[
  {
    "path": ".gitignore",
    "content": "## Unimportant file\n.idea/\n*.ini\n*.lnk\n*.c\nrelease\nserver_config\n.venv\npython\nsystem\nvenv\nTHUDM\nBelleGroup\n\n*egg-info\nconfigs/\nbuild/\ndist/\nmodels/\nsample/\nweights/\ntemp/\nlog/\nlogs/\n*/__pycache__/\nwork_dirs/\n\n*log.txt\n*.cpth\n*.ctrt\n*.engine\n\n*.build\n*.txt\n*.log\n*.jpg\n*.bmp\n*.png\n*.tif\n*.tiff\n*.pdf\n*.json\n*.jsonl\n*.arrow\ndata\nruns\n\nThumbs.db\n.DS_Store\n\n# Byte-compiled / optimized / DLL files\n\n__pycache__/\n*.py[cod]\n*$py.class\n\n# Deployment\n\n*.bin\n*.zip\n*.pyd\n*.pyi\n\n# C extensions\n\n*.so\n*.xlsx\n\n# Distribution / packaging\n\n.Python build/ develop-eggs/ release/ standalone/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/\n*.egg-info/ .installed.cfg\n*.egg MANIFEST\n\n# PyInstaller\n\n# Usually these files are written by a python script from a template\n\n# before PyInstaller builds the exe, so as to inject date/other infos into it.\n\n*.manifest\n*.spec\n\n# Installer logs\n\npip-log.txt pip-delete-this-directory.txt\n\n# Unit test / coverage reports\n\nhtmlcov/ .tox/ .coverage .coverage.*\n.cache nosetests.xml coverage.xml\n*.cover .hypothesis/ .pytest_cache/\n\n# Translations\n\n*.mo\n*.pot\n\n# Django stuff:\n\n*.log local_settings.py db.sqlite3\n\n# Flask stuff:\n\ninstance/ .webassets-cache\n\n# Scrapy stuff:\n\n.scrapy\n\n# Sphinx documentation\n\ndocs/_build/\n\n# PyBuilder\n\ntarget/\n\n# Jupyter Notebook\n\n.ipynb_checkpoints\n\n# pyenv\n\n.python-version\n\n# celery beat schedule file\n\ncelerybeat-schedule\n\n# SageMath parsed files\n\n*.sage.py\n\n# Environments\n\n.env .venv env/ venv/ ENV/ env.bak/ venv.bak/\n\n# Spyder project settings\n\n.spyderproject .spyproject\n\n# Rope project settings\n\n.ropeproject\n\n# mkdocs documentation\n\n/site\n\n# mypy\n\n.mypy_cache/\n\ndata/ data .vscode .idea .DS_Store\n\n# custom\n\n*.pkl\n*.pkl.json\n*.log.json work_dirs/\n\n# Pytorch\n\n*.pth\n*.pt\n*.py~\n*.sh~\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "MODEL_LICENSE",
    "content": "The ChatGLM-6B License\n\n1. Definitions\n\n“Licensor” means the ChatGLM-6B Model Team that distributes its Software.\n\n“Software” means the ChatGLM-6B model parameters made available under this license.\n\n2. License Grant\n\nSubject to the terms and conditions of this License, the Licensor hereby grants to you a non-exclusive, worldwide, non-transferable, non-sublicensable, revocable, royalty-free copyright license to use the Software solely for your non-commercial research purposes.\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\n3. Restriction\n\nYou will not use, copy, modify, merge, publish, distribute, reproduce, or create derivative works of the Software, in whole or in part, for any commercial, military, or illegal purposes.\n\nYou will not use the Software for any act that may undermine China's national security and national unity, harm the public interest of society, or infringe upon the rights and interests of human beings.\n\n4. Disclaimer\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n5. Limitation of Liability\n\nEXCEPT TO THE EXTENT PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL THEORY, WHETHER BASED IN TORT, NEGLIGENCE, CONTRACT, LIABILITY, OR OTHERWISE WILL ANY LICENSOR BE LIABLE TO YOU FOR ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES, OR ANY OTHER COMMERCIAL LOSSES, EVEN IF THE LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.\n\n6. Dispute Resolution\n\nThis license shall be governed and construed in accordance with the laws of People’s Republic of China. Any dispute arising from or in connection with this License shall be submitted to Haidian District People's Court in Beijing.\n\nNote that the license is subject to update to a more comprehensive version.  For any questions related to the license and copyright, please contact us at glm-130b@googlegroups.com.\n"
  },
  {
    "path": "README.md",
    "content": "# 💡Creative ChatGLM WebUI\n\n👋 欢迎来到 ChatGLM 创意世界！你可以使用修订和续写的功能来生成创意内容！\n\n* 📖 你可以使用“续写”按钮帮 ChatGLM 想一个开头，并让它继续生成更多的内容。\n* 📝 你可以使用“修订”按钮修改最后一句 ChatGLM 的回复。\n\n# 环境配置\n\n## 离线包\n\n此安装方法适合：\n\n* 非开发人员，不需要写代码\n* 没有Python经验，不会搭建环境\n* 网络环境不好，配置环境、下载模型速度慢\n\n| 名称           | 大小      | 百度网盘 | 备注                                     |\n|--------------|---------| ---- |----------------------------------------|\n| **小显存离线包**   | 5.3 GB  | [点击下载](https://pan.baidu.com/s/1fI1JWBE7KP7cJsoD-dL38g?pwd=cglm) | chatglm2-6b-int4 离线包，显存需求 8GB          |\n| 大显存离线包       | 11.5 GB | [点击下载](https://pan.baidu.com/s/10oUwW2DUMDFk3RuIkaqGbA?pwd=cglm) | chatglm3-6b 离线包，显存需求 16GB              |\n| 长文本离线包       | 11.5 GB | [点击下载](https://pan.baidu.com/s/1kbeTdPcUmYd16IE0stXnTA?pwd=cglm) | chatglm3-6b-128k 离线包，显存需求 16GB         |\n| **GLM4 离线包** | 16.98GB | [点击下载](https://pan.baidu.com/s/1iGCzB5DO2sGCzKtARvTXnw?pwd=cglm) | GLM-4-9B 离线包，INT4 加载，显存需求 10GB          |\n| 环境离线包        | 2.6 GB  | [点击下载](https://pan.baidu.com/s/1Kt9eZlgXJ03bVwIM22IR6w?pwd=cglm) | 不带权重的环境包，启动之后自动下载 chatglm2-6b-int4 权重。 |\n\n除了这些一键环境包之外，你还可以在下面下载更多模型的权重。\n\n* 百度网盘链接：[https://pan.baidu.com/s/1pnIEj66scZOswHm8oivXmw?pwd=cglm](https://pan.baidu.com/s/1pnIEj66scZOswHm8oivXmw?pwd=cglm)\n\n下载好环境包之后，解压，然后运行 `start_offline.bat` 脚本，即可启动服务：\n\n![Snipaste_2024-04-07_16-14-54](https://github.com/ypwhs/CreativeChatGLM/assets/10473170/7ab0c0e8-9ada-43ad-b237-ba0f2008bdac)\n\n如果你想使用 API 的形式来调用，可以运行 `start_offline_api.bat` 启动 API 服务：\n\n![Snipaste_2024-04-07_16-16-01](https://github.com/ypwhs/CreativeChatGLM/assets/10473170/959c1742-da2e-4b37-b40f-6fb96c53ab89)\n\n## 虚拟环境\n\n此安装方法适合已经安装了 Python，但是希望环境与系统已安装的 Python 环境隔离的用户。\n\n<details><summary>点击查看详细步骤</summary>\n\n首先启动 `setup_venv.bat` 脚本，安装环境：\n\n![image](https://user-images.githubusercontent.com/10473170/227982667-a8090ffa-f836-4ebc-93a1-91ab39d9259b.png)\n\n然后使用 `start_venv.bat` 脚本启动服务：\n\n![image](https://user-images.githubusercontent.com/10473170/227983154-27ed9751-b9c3-44ec-9583-31f192955b11.png)\n\n</details>\n\n## Python 开发环境\n\n此项配置方法适合代码开发人员，使用的是自己系统里安装的 Python。\n\n环境配置参考官方链接：[https://github.com/THUDM/ChatGLM-6B](https://github.com/THUDM/ChatGLM-6B)\n\n配置好之后，运行 `app.py` 开始使用，或者使用 IDE 开始开发。\n\n# 用法介绍\n\n## 续写\n\n### 原始对话\n\n如果你直接问 ChatGLM：“你几岁了？”\n\n它只会回答：“作为一个人工智能语言模型，我没有年龄，我只是一个正在不断学习和进化的程序。”\n\n<img width=\"388\" alt=\"image\" src=\"https://user-images.githubusercontent.com/10473170/227778266-e7f2b55a-59de-4eee-bfa2-f28f911ec018.png\">\n\n### 续写对话\n\n而如果你给它起个头：“我今年”\n\n它就会回答：“我今年21岁。”\n\n<img width=\"388\" alt=\"image\" src=\"https://user-images.githubusercontent.com/10473170/227778334-d459ad8d-7c16-466d-851c-5af174216773.png\">\n\n### 使用视频\n\n![ChatGLM2](https://user-images.githubusercontent.com/10473170/227778636-a8fcd650-eeeb-44e5-8f24-9260b27cce5d.gif)\n\n## 修订\n\n### 原始对话\n\n如果你直接跟 ChatGLM 说：“你是谁？”\n\n它会回答：“我是一个名为 ChatGLM-6B 的人工智能助手，是基于清华大学 KEG 实验室和智谱 AI 公司于 2023 年共同训练的语言模型开发的。我的任务是针对用户的问题和要求提供适当的答复和支持。”\n\n你再问它：“你几岁了？”\n\n它只会说：“作为一个人工智能助手，我没有年龄，因为我只是一个程序，没有实际的肉体或生命。我只是一个在计算机上运行的程序，专门设计为回答用户的问题和提供相关的帮助。”\n\n![image](https://user-images.githubusercontent.com/10473170/227777039-75b9dfb6-9b83-45af-8555-c3a27808c683.png)\n\n### 修改对话\n\n你可以改变它的角色，比如你通过“修订”功能，将它的回复改成：“我是杨开心。”\n\n然后你再问它：“你几岁了？”\n\n它就会回答：“我今年15岁。”\n\n![image](https://user-images.githubusercontent.com/10473170/227777136-e2a176f8-6742-41a9-abaf-72a9540b834d.png)\n\n### 使用视频\n\n![未命名项目](https://user-images.githubusercontent.com/10473170/227777930-6aa5981a-0695-40c7-b083-b76bb063c481.gif)\n\n### 重新对话\n\n你可以按照某个输入，重复生成对话，从而拿到满意的结果。\n\n### 使用视频\n\n![20u5z-svohg](https://user-images.githubusercontent.com/10473170/233827340-4973c0fd-2fc5-4941-8f8d-2216cbee8ed9.gif)\n\n# 实现原理\n\n这个方法并没有训练，没有修改官方发布的权重，而只是对推理的函数做了修改。\n\n续写的原理是，将用户的输入直接设置为 `history[-1][1]`，模拟模型自己的部分输出，然后继续走之后的推理函数 `stream_chat_continue` [code](https://github.com/ypwhs/CreativeChatGLM/blob/a5c6dd1/chatglm/modeling_chatglm.py#L1158)。\n\n修订的原理是，将用户的输入直接设置为 `history[-1][1]`，模拟模型自己的完整输出，但是不走推理函数。\n\n# 离线包制作方法\n\n关于本项目中的离线包制作方法，可以查看下面的详细步骤。\n\n<details><summary>点击查看详细步骤</summary>\n\n## 准备 Python\n\n首先去 Python 官网下载：[https://www.python.org/downloads/](https://www.python.org/downloads/)\n\n![image](https://user-images.githubusercontent.com/10473170/229679144-86d96c5c-58e0-4a54-9657-ccfe37943c6e.png)\n\n注意要下载 `Windows embeddable package (64-bit)` 离线包，我选择的是 [python-3.10.10-embed-amd64.zip](https://www.python.org/ftp/python/3.10.10/python-3.10.10-embed-amd64.zip)。\n\n![image](https://user-images.githubusercontent.com/10473170/229679189-1f8b2032-c92c-47ee-ba25-147f4acbf90f.png)\n\n解压到 `./system/python` 目录下。\n\n![image](https://user-images.githubusercontent.com/10473170/229679264-b3633920-757f-4ab8-b9f8-e79a21036146.png)\n\n## 准备 get-pip.py\n\n去官网下载：[https://bootstrap.pypa.io/get-pip.py](https://bootstrap.pypa.io/get-pip.py)\n\n保存到 `./system/python` 目录下。\n\n## ⚠️必做\n\n解压之后，记得删除 pth 文件，以解决安装依赖的问题。\n\n比如我删除的文件路径是 `./system/python/python310._pth`\n\n![image](https://user-images.githubusercontent.com/10473170/229679450-7acc005d-8203-4dd6-8be9-fa546aeaa2bf.png)\n\n## 安装依赖\n\n运行 [setup_offline.bat](setup_offline.bat) 脚本，安装依赖。\n\n![image](https://user-images.githubusercontent.com/10473170/229679544-162b8db1-851f-47f0-af54-675c6a710b42.png)\n\n## 下载离线模型\n\n你可以使用 [download_model.py](download_model.py) 脚本下载模型，如果你的网络环境不好，这个过程可能会很长。下载的模型会存在 `~/.cache` 一份，存在 `./models` 一份。\n\n当你之后使用 `AutoModel.from_pretrained` 加载模型时，可以从 `~/.cache` 缓存目录加载模型，避免二次下载。\n\n![image](https://user-images.githubusercontent.com/10473170/229679938-44486557-dbc7-4e0b-9793-acfb6c46459e.png)\n\n下载好的模型，你需要从 `./models` 文件夹移出到项目目录下，这样就可以离线加载了。\n\n![image](https://user-images.githubusercontent.com/10473170/229680125-6af06b25-3d26-49cc-969b-4f6154c522de.png)\n\n下载完模型之后，你需要修改 [app.py](app.py) 里的 `model_name`，改成你想加载的模型名称。\n\n## 测试\n\n使用 [start_offline.bat](start_offline.bat) 启动服务：\n\n![image](https://user-images.githubusercontent.com/10473170/229680404-0b28dfd4-382e-4cfc-9392-997f134c0242.png)\n\n可以看到，服务正常启动。\n\n</details>\n\n# 协议\n\n本仓库的代码依照 [Apache-2.0](LICENSE) 协议开源，ChatGLM-6B 模型的权重的使用则需要遵循 [Model License](MODEL_LICENSE)。\n"
  },
  {
    "path": "app.py",
    "content": "import gradio as gr\nfrom utils_env import collect_env\n\n# 收集环境信息\nprint('Collect environment info'.center(64, '-'))\nfor name, val in collect_env().items():\n    print(f'{name}: {val}')\nprint('Done'.center(64, '-'))\n\n# 加载模型\nmodel_name = 'THUDM/glm-4-9b-chat-1m'\nint4 = True\n\nif 'glm-4' in model_name.lower():\n    from predictors.glm4_predictor import GLM4\n    predictor = GLM4(model_name, int4=int4)\nelif 'chatglm3' in model_name.lower():\n    from predictors.chatglm3_predictor import ChatGLM3\n    predictor = ChatGLM3(model_name)\nelif 'chatglm2' in model_name.lower():\n    from predictors.chatglm2_predictor import ChatGLM2\n    predictor = ChatGLM2(model_name)\nelif 'chatglm' in model_name.lower():\n    from predictors.chatglm_predictor import ChatGLM\n    predictor = ChatGLM(model_name)\nelif 'gptq' in model_name.lower():\n    from predictors.llama_gptq import LLaMaGPTQ\n    predictor = LLaMaGPTQ(model_name)\nelif 'llama' in model_name.lower():\n    from predictors.llama import LLaMa\n    predictor = LLaMa(model_name)\nelif 'debug' in model_name.lower():\n    from predictors.debug import Debug\n    predictor = Debug(model_name)\nelse:\n    from predictors.chatglm_predictor import ChatGLM\n    predictor = ChatGLM(model_name)\n\n\ndef revise(history, latest_message):\n    if isinstance(history[-1], tuple):\n        history[-1] = (history[-1][0], latest_message)\n    elif isinstance(history[-1], dict):\n        history[-1]['content'] = latest_message\n    return history, ''\n\n\ndef revoke(history, last_state):\n    if len(history) >= 1:\n        history.pop()\n    last_state[0] = history\n    last_state[1] = ''\n    last_state[2] = ''\n    return history\n\n\ndef interrupt(allow_generate):\n    allow_generate[0] = False\n\n\ndef regenerate(last_state, max_length, top_p, temperature, allow_generate):\n    history, query, continue_message = last_state\n    if len(query) == 0:\n        print(\"Please input a query first.\")\n        return\n    for x in predictor.predict_continue(query, continue_message, max_length,\n                                        top_p, temperature, allow_generate,\n                                        history, last_state):\n        yield x\n\n\n# 搭建 UI 界面\nwith gr.Blocks(css=\"\"\".message {\n    width: inherit !important;\n    padding-left: 20px !important;\n}\"\"\") as demo:\n    gr.Markdown(f\"\"\"\n# 💡Creative ChatGLM WebUI\n\n👋 欢迎来到 ChatGLM 创意世界！[https://github.com/ypwhs/CreativeChatGLM](https://github.com/ypwhs/CreativeChatGLM)\n\n当前模型：{model_name}\n\n* 📖 你可以使用“续写”按钮帮 ChatGLM 想一个开头，并让它继续生成更多的内容。\n* 📝 你可以使用“修订”按钮修改最后一句 ChatGLM 的回复。\n\"\"\")\n    with gr.Row():\n        with gr.Column(scale=4):\n            chatbot = gr.Chatbot(\n                elem_id=\"chat-box\", show_label=False, height=850)\n        with gr.Column(scale=1):\n            with gr.Row():\n                max_length = gr.Slider(\n                    32,\n                    4096,\n                    value=2048,\n                    step=1.0,\n                    label=\"Maximum length\",\n                    interactive=True)\n                top_p = gr.Slider(\n                    0.01,\n                    1,\n                    value=0.7,\n                    step=0.01,\n                    label=\"Top P\",\n                    interactive=True)\n                temperature = gr.Slider(\n                    0.01,\n                    5,\n                    value=0.95,\n                    step=0.01,\n                    label=\"Temperature\",\n                    interactive=True)\n            with gr.Row():\n                query = gr.Textbox(\n                    show_label=False, placeholder=\"Prompts\", lines=4)\n                generate_button = gr.Button(\"生成\")\n            with gr.Row():\n                continue_message = gr.Textbox(\n                    show_label=False, placeholder=\"Continue message\", lines=2)\n                continue_btn = gr.Button(\"续写\")\n                revise_message = gr.Textbox(\n                    show_label=False, placeholder=\"Revise message\", lines=2)\n                revise_btn = gr.Button(\"修订\")\n                revoke_btn = gr.Button(\"撤回\")\n                regenerate_btn = gr.Button(\"重新生成\")\n                interrupt_btn = gr.Button(\"终止生成\")\n\n    history = gr.State([])\n    allow_generate = gr.State([True])\n    blank_input = gr.State(\"\")\n    last_state = gr.State([[], '', ''])  # history, query, continue_message\n    generate_button.click(\n        predictor.predict_continue,\n        inputs=[\n            query, blank_input, max_length, top_p, temperature, allow_generate,\n            history, last_state\n        ],\n        outputs=[chatbot, query])\n    revise_btn.click(\n        revise,\n        inputs=[history, revise_message],\n        outputs=[chatbot, revise_message])\n    revoke_btn.click(revoke, inputs=[history, last_state], outputs=[chatbot])\n    continue_btn.click(\n        predictor.predict_continue,\n        inputs=[\n            query, continue_message, max_length, top_p, temperature,\n            allow_generate, history, last_state\n        ],\n        outputs=[chatbot, query, continue_message])\n    regenerate_btn.click(\n        regenerate,\n        inputs=[last_state, max_length, top_p, temperature, allow_generate],\n        outputs=[chatbot, query, continue_message])\n    interrupt_btn.click(interrupt, inputs=[allow_generate])\n\ndemo.queue().launch(\n    server_name='0.0.0.0', server_port=7860, share=False, inbrowser=False)\ndemo.close()\n"
  },
  {
    "path": "app_fastapi.py",
    "content": "from utils_env import collect_env\nfrom fastapi import FastAPI\nfrom fastapi.responses import StreamingResponse\nfrom fastapi.middleware.cors import CORSMiddleware\nimport uvicorn\nimport argparse\nimport logging\nimport os\nimport json\nimport sys\n\n# 加载模型\n# model_name = 'THUDM/chatglm-6b'\nmodel_name = 'THUDM/chatglm3-6b'\n\nif 'chatglm' in model_name.lower():\n    from predictors.chatglm_predictor import ChatGLM\n    predictor = ChatGLM(model_name)\nelif 'gptq' in model_name.lower():\n    from predictors.llama_gptq import LLaMaGPTQ\n    predictor = LLaMaGPTQ(model_name)\nelif 'llama' in model_name.lower():\n    from predictors.llama import LLaMa\n    predictor = LLaMa(model_name)\nelif 'debug' in model_name.lower():\n    from predictors.debug import Debug\n    predictor = Debug(model_name)\nelse:\n    from predictors.chatglm_predictor import ChatGLM\n    predictor = ChatGLM(model_name)\n\n\n# 接入log\ndef getLogger(name, file_name, use_formatter=True):\n    logger = logging.getLogger(name)\n    logger.setLevel(logging.INFO)\n    console_handler = logging.StreamHandler(sys.stdout)\n    formatter = logging.Formatter('%(asctime)s    %(message)s')\n    console_handler.setFormatter(formatter)\n    console_handler.setLevel(logging.INFO)\n    logger.addHandler(console_handler)\n    if file_name:\n        handler = logging.FileHandler(file_name, encoding='utf8')\n        handler.setLevel(logging.INFO)\n        if use_formatter:\n            formatter = logging.Formatter(\n                '%(asctime)s - %(name)s - %(message)s')\n            handler.setFormatter(formatter)\n        logger.addHandler(handler)\n    return logger\n\n\nlogger = getLogger('ChatGLM', 'chatlog.log')\n\n\n\n\n# 接入FastAPI\ndef start_server(quantize_level, http_address: str, port: int, gpu_id: str):\n    os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\n    os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id\n\n    bot = predictor\n\n    app = FastAPI()\n    app.add_middleware(\n        CORSMiddleware,\n        allow_origins=[\"*\"],\n        allow_credentials=True,\n        allow_methods=[\"*\"],\n        allow_headers=[\"*\"])\n\n    allow_generate = [True]\n\n    @app.get(\"/\")\n    def index():\n        return {'message': 'started', 'success': True}\n\n    @app.post(\"/stream\")\n    def continue_question_stream(arg_dict: dict):\n\n        def decorate(generator):\n            for item in generator:\n                yield f\"data: {json.dumps(item, ensure_ascii=False)}\\n\\n\"\n\n        # inputs = [query, answer_prefix, max_length, top_p, temperature, allow_generate, history]\n        try:\n            query = arg_dict[\"query\"]\n            answer_prefix = arg_dict.get(\"answer_prefix\", \"\")\n            max_length = arg_dict.get(\"max_length\", 256)\n            top_p = arg_dict.get(\"top_p\", 0.7)\n            temperature = arg_dict.get(\"temperature\", 1.0)\n            allow_generate = arg_dict.get(\"allow_generate\", [True])\n            history = arg_dict.get(\"history\", [])\n            logger.info(\"Query - {}\".format(query))\n            if answer_prefix:\n                logger.info(f\"answer_prefix - {answer_prefix}\")\n            history = history[-MAX_HISTORY:]\n            if len(history) > 0:\n                logger.info(\"History - {}\".format(history))\n            \n            history = [tuple(h) for h in history]\n            inputs = [\n                query, answer_prefix, max_length, top_p, temperature,\n                allow_generate, history\n            ]\n            return StreamingResponse(decorate(bot.predict_continue(*inputs)))\n            # return EventSourceResponse(bot.predict_continue(*inputs))\n        except Exception as e:\n            logger.error(f\"error: {e}\")\n            return StreamingResponse(\n                decorate(bot.predict_continue(None, None)))\n\n    @app.post(\"/interrupt\")\n    def interrupt():\n        allow_generate[0] = False\n        logger.error(\"Interrupted.\")\n        return {\"message\": \"OK\"}\n\n    logger.info(\"starting server...\")\n    uvicorn.run(app=app, host=http_address, port=port)\n\n\nif __name__ == '__main__':\n    # 超参数 用于控制模型回复时 上文的长度\n    MAX_HISTORY = 5\n\n    parser = argparse.ArgumentParser(\n        description='Stream API Service for ChatGLM-6B')\n    parser.add_argument(\n        '--device',\n        '-d',\n        help='device，-1 means cpu, other means gpu ids',\n        default='0')\n    parser.add_argument(\n        '--quantize',\n        '-q',\n        help='level of quantize, option：16, 8 or 4',\n        default=16)\n    parser.add_argument(\n        '--host', '-H', help='host to listen', default='0.0.0.0')\n    parser.add_argument(\n        '--port', '-P', help='port of this service', default=8000)\n    args = parser.parse_args()\n    start_server(args.quantize, args.host, int(args.port), args.device)"
  },
  {
    "path": "chatglm/configuration_chatglm.py",
    "content": "\"\"\" ChatGLM model configuration \"\"\"\n\nfrom transformers.configuration_utils import PretrainedConfig\nfrom transformers.utils import logging\n\nlogger = logging.get_logger(__name__)\n\n\nclass ChatGLMConfig(PretrainedConfig):\n    r\"\"\"\n    This is the configuration class to store the configuration of a [`~ChatGLMModel`].\n    It is used to instantiate an ChatGLM model according to the specified arguments, defining the model\n    architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of\n    the ChatGLM-6B [THUDM/ChatGLM-6B](https://huggingface.co/THUDM/chatglm-6b) architecture.\n\n    Configuration objects inherit from  [`PretrainedConfig`] and can be used\n    to control the model outputs. Read the documentation from  [`PretrainedConfig`]\n    for more information.\n\n\n    Args:\n        vocab_size (`int`, *optional*, defaults to 150528):\n            Vocabulary size of the ChatGLM-6B model. Defines the number of different tokens that can be represented by the\n            `inputs_ids` passed when calling [`~ChatGLMModel`] or\n            [`~TFChatGLMModel`].\n        hidden_size (`int`, *optional*, defaults to 4096):\n            Dimension of the encoder layers and the pooler layer.\n        num_hidden_layers (`int`, *optional*, defaults to 28):\n            Number of hidden layers in the Transformer encoder.\n        num_attention_heads (`int`, *optional*, defaults to 32):\n            Number of attention heads for each attention layer in the Transformer encoder.\n        inner_hidden_size (`int`, *optional*, defaults to 16384):\n            Dimension of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n        max_sequence_length (`int`, *optional*, defaults to 512):\n            The maximum sequence length that this model might ever be used with.\n            Typically set this to something large just in case (e.g., 512 or 1024 or 2048).\n        layernorm_epsilon (`float`, *optional*, defaults to 1e-5):\n            The epsilon used by the layer normalization layers.\n        use_cache (`bool`, *optional*, defaults to `True`):\n            Whether the model should return the last key/values attentions (not used by all models).\n        Example:\n\n    ```python\n    >>> from configuration_chatglm import ChatGLMConfig\n    >>> from modeling_chatglm import ChatGLMModel\n\n    >>> # Initializing a ChatGLM-6B THUDM/ChatGLM-6B style configuration\n    >>> configuration = ChatGLMConfig()\n\n    >>> # Initializing a model from the THUDM/ChatGLM-6B style configuration\n    >>> model = ChatGLMModel(configuration)\n\n    >>> # Accessing the model configuration\n    >>> configuration = model.config\n    ```\n\"\"\"\n    model_type = \"chatglm\"\n\n    def __init__(\n            self,\n            vocab_size=150528,\n            hidden_size=4096,\n            num_layers=28,\n            num_attention_heads=32,\n            layernorm_epsilon=1e-5,\n            use_cache=False,\n            bos_token_id=150004,\n            eos_token_id=150005,\n            mask_token_id=150000,\n            gmask_token_id=150001,\n            pad_token_id=0,\n            max_sequence_length=2048,\n            inner_hidden_size=16384,\n            position_encoding_2d=True,\n            quantization_bit=0,\n            pre_seq_len=None,\n            prefix_projection=False,\n            **kwargs\n    ):\n        self.num_layers = num_layers\n        self.vocab_size = vocab_size\n        self.hidden_size = hidden_size\n        self.num_attention_heads = num_attention_heads\n        self.max_sequence_length = max_sequence_length\n        self.layernorm_epsilon = layernorm_epsilon\n        self.inner_hidden_size = inner_hidden_size\n        self.use_cache = use_cache\n        self.bos_token_id = bos_token_id\n        self.eos_token_id = eos_token_id\n        self.pad_token_id = pad_token_id\n        self.mask_token_id = mask_token_id\n        self.gmask_token_id = gmask_token_id\n        self.position_encoding_2d = position_encoding_2d\n        self.quantization_bit = quantization_bit\n        self.pre_seq_len = pre_seq_len\n        self.prefix_projection = prefix_projection\n\n        super().__init__(\n            pad_token_id=pad_token_id,\n            bos_token_id=bos_token_id,\n            eos_token_id=eos_token_id,\n            **kwargs\n        )\n"
  },
  {
    "path": "chatglm/modeling_chatglm.py",
    "content": "\"\"\" PyTorch ChatGLM model. \"\"\"\n\nimport math\nimport copy\nimport os\nimport warnings\nimport re\nimport sys\n\nimport torch\nimport torch.utils.checkpoint\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss, LayerNorm\nfrom torch.nn.utils import skip_init\nfrom typing import Optional, Tuple, Union, List, Callable, Dict, Any\n\nfrom transformers.utils import (\n    add_code_sample_docstrings,\n    add_start_docstrings,\n    add_start_docstrings_to_model_forward,\n)\nfrom transformers.modeling_outputs import (\n    BaseModelOutputWithPast,\n    CausalLMOutputWithPast,\n    BaseModelOutputWithPastAndCrossAttentions,\n)\nfrom transformers.modeling_utils import PreTrainedModel\nfrom transformers.utils import logging\nfrom transformers.generation.logits_process import LogitsProcessor\nfrom transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig, ModelOutput\n\nfrom .configuration_chatglm import ChatGLMConfig\n\n# flags required to enable jit fusion kernels\n\nif sys.platform != 'darwin':\n    torch._C._jit_set_profiling_mode(False)\n    torch._C._jit_set_profiling_executor(False)\n    torch._C._jit_override_can_fuse_on_cpu(True)\n    torch._C._jit_override_can_fuse_on_gpu(True)\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"THUDM/ChatGLM-6B\"\n_CONFIG_FOR_DOC = \"ChatGLM6BConfig\"\n\nCHATGLM_6B_PRETRAINED_MODEL_ARCHIVE_LIST = [\n    \"THUDM/chatglm-6b\",\n    # See all ChatGLM-6B models at https://huggingface.co/models?filter=chatglm\n]\n\n\nclass InvalidScoreLogitsProcessor(LogitsProcessor):\n    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:\n        if torch.isnan(scores).any() or torch.isinf(scores).any():\n            scores.zero_()\n            scores[..., 5] = 5e4\n        return scores\n\n\ndef load_tf_weights_in_chatglm_6b(model, config, tf_checkpoint_path):\n    \"\"\"Load tf checkpoints in a pytorch model.\"\"\"\n    try:\n        import re\n\n        import numpy as np\n        import tensorflow as tf\n    except ImportError:\n        logger.error(\n            \"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see \"\n            \"https://www.tensorflow.org/install/ for installation instructions.\"\n        )\n        raise\n    tf_path = os.path.abspath(tf_checkpoint_path)\n    logger.info(f\"Converting TensorFlow checkpoint from {tf_path}\")\n    # Load weights from TF model\n    init_vars = tf.train.list_variables(tf_path)\n    names = []\n    arrays = []\n    for name, shape in init_vars:\n        logger.info(f\"Loading TF weight {name} with shape {shape}\")\n        array = tf.train.load_variable(tf_path, name)\n        names.append(name)\n        arrays.append(array)\n\n    for name, array in zip(names, arrays):\n        name = name.split(\"/\")\n        # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v\n        # which are not required for using pretrained model\n        if any(\n                n in [\"adam_v\", \"adam_m\", \"AdamWeightDecayOptimizer\", \"AdamWeightDecayOptimizer_1\", \"global_step\"]\n                for n in name\n        ):\n            logger.info(f\"Skipping {'/'.join(name)}\")\n            continue\n        pointer = model\n        for m_name in name:\n            if re.fullmatch(r\"[A-Za-z]+_\\d+\", m_name):\n                scope_names = re.split(r\"_(\\d+)\", m_name)\n            else:\n                scope_names = [m_name]\n            if scope_names[0] == \"kernel\" or scope_names[0] == \"gamma\":\n                pointer = getattr(pointer, \"weight\")\n            elif scope_names[0] == \"output_bias\" or scope_names[0] == \"beta\":\n                pointer = getattr(pointer, \"bias\")\n            elif scope_names[0] == \"output_weights\":\n                pointer = getattr(pointer, \"weight\")\n            elif scope_names[0] == \"squad\":\n                pointer = getattr(pointer, \"classifier\")\n            else:\n                try:\n                    pointer = getattr(pointer, scope_names[0])\n                except AttributeError:\n                    logger.info(f\"Skipping {'/'.join(name)}\")\n                    continue\n            if len(scope_names) >= 2:\n                num = int(scope_names[1])\n                pointer = pointer[num]\n        if m_name[-11:] == \"_embeddings\":\n            pointer = getattr(pointer, \"weight\")\n        elif m_name == \"kernel\":\n            array = np.transpose(array)\n        try:\n            assert (\n                    pointer.shape == array.shape\n            ), f\"Pointer shape {pointer.shape} and array shape {array.shape} mismatched\"\n        except AssertionError as e:\n            e.args += (pointer.shape, array.shape)\n            raise\n        logger.info(f\"Initialize PyTorch weight {name}\")\n        pointer.data = torch.from_numpy(array)\n    return model\n\n\nclass PrefixEncoder(torch.nn.Module):\n    \"\"\"\n    The torch.nn model to encode the prefix\n    Input shape: (batch-size, prefix-length)\n    Output shape: (batch-size, prefix-length, 2*layers*hidden)\n    \"\"\"\n\n    def __init__(self, config):\n        super().__init__()\n        self.prefix_projection = config.prefix_projection\n        if self.prefix_projection:\n            # Use a two-layer MLP to encode the prefix\n            self.embedding = torch.nn.Embedding(config.pre_seq_len, config.hidden_size)\n            self.trans = torch.nn.Sequential(\n                torch.nn.Linear(config.hidden_size, config.hidden_size),\n                torch.nn.Tanh(),\n                torch.nn.Linear(config.hidden_size, config.num_layers * config.hidden_size * 2)\n            )\n        else:\n            self.embedding = torch.nn.Embedding(config.pre_seq_len, config.num_layers * config.hidden_size * 2)\n\n    def forward(self, prefix: torch.Tensor):\n        if self.prefix_projection:\n            prefix_tokens = self.embedding(prefix)\n            past_key_values = self.trans(prefix_tokens)\n        else:\n            past_key_values = self.embedding(prefix)\n        return past_key_values\n\n\n@torch.jit.script\ndef gelu_impl(x):\n    \"\"\"OpenAI's gelu implementation.\"\"\"\n    return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x *\n                                       (1.0 + 0.044715 * x * x)))\n\n\ndef gelu(x):\n    return gelu_impl(x)\n\n\nclass RotaryEmbedding(torch.nn.Module):\n    def __init__(self, dim, base=10000, precision=torch.half, learnable=False):\n        super().__init__()\n        inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim))\n        if precision == torch.half:\n            inv_freq = inv_freq.half()\n        self.learnable = learnable\n        if learnable:\n            self.inv_freq = torch.nn.Parameter(inv_freq)\n            self.max_seq_len_cached = None\n        else:\n            self.register_buffer('inv_freq', inv_freq)\n            self.max_seq_len_cached = None\n            self.cos_cached = None\n            self.sin_cached = None\n        self.precision = precision\n\n    def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys,\n                              error_msgs):\n        pass\n\n    def forward(self, x, seq_dim=1, seq_len=None):\n        if seq_len is None:\n            seq_len = x.shape[seq_dim]\n        if self.max_seq_len_cached is None or (seq_len > self.max_seq_len_cached):\n            self.max_seq_len_cached = None if self.learnable else seq_len\n            t = torch.arange(seq_len, device=x.device, dtype=self.inv_freq.dtype)\n            freqs = torch.einsum('i,j->ij', t, self.inv_freq)\n            # Different from paper, but it uses a different permutation in order to obtain the same calculation\n            emb = torch.cat((freqs, freqs), dim=-1).to(x.device)\n            if self.precision == torch.bfloat16:\n                emb = emb.float()\n\n            # [sx, 1 (b * np), hn]\n            cos_cached = emb.cos()[:, None, :]\n            sin_cached = emb.sin()[:, None, :]\n            if self.precision == torch.bfloat16:\n                cos_cached = cos_cached.bfloat16()\n                sin_cached = sin_cached.bfloat16()\n            if self.learnable:\n                return cos_cached, sin_cached\n            self.cos_cached, self.sin_cached = cos_cached, sin_cached\n        return self.cos_cached[:seq_len, ...], self.sin_cached[:seq_len, ...]\n\n    def _apply(self, fn):\n        if self.cos_cached is not None:\n            self.cos_cached = fn(self.cos_cached)\n        if self.sin_cached is not None:\n            self.sin_cached = fn(self.sin_cached)\n        return super()._apply(fn)\n\n\ndef rotate_half(x):\n    x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:]\n    return torch.cat((-x2, x1), dim=x1.ndim - 1)  # dim=-1 triggers a bug in earlier torch versions\n\n\n@torch.jit.script\ndef apply_rotary_pos_emb_index(q, k, cos, sin, position_id):\n    # position_id: [sq, b], q, k: [sq, b, np, hn], cos: [sq, 1, hn] -> [sq, b, 1, hn]\n    cos, sin = F.embedding(position_id, cos.squeeze(1)).unsqueeze(2), \\\n        F.embedding(position_id, sin.squeeze(1)).unsqueeze(2)\n    q, k = (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin)\n    return q, k\n\n\ndef attention_fn(\n        self,\n        query_layer,\n        key_layer,\n        value_layer,\n        attention_mask,\n        hidden_size_per_partition,\n        layer_id,\n        layer_past=None,\n        scaling_attention_score=True,\n        use_cache=False,\n):\n    if layer_past is not None:\n        past_key, past_value = layer_past[0], layer_past[1]\n        key_layer = torch.cat((past_key, key_layer), dim=0)\n        value_layer = torch.cat((past_value, value_layer), dim=0)\n\n    # seqlen, batch, num_attention_heads, hidden_size_per_attention_head\n    seq_len, b, nh, hidden_size = key_layer.shape\n\n    if use_cache:\n        present = (key_layer, value_layer)\n    else:\n        present = None\n\n    query_key_layer_scaling_coeff = float(layer_id + 1)\n    if scaling_attention_score:\n        query_layer = query_layer / (math.sqrt(hidden_size) * query_key_layer_scaling_coeff)\n\n    # ===================================\n    # Raw attention scores. [b, np, s, s]\n    # ===================================\n\n    # [b, np, sq, sk]\n    output_size = (query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0))\n\n    # [sq, b, np, hn] -> [sq, b * np, hn]\n    query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1)\n    # [sk, b, np, hn] -> [sk, b * np, hn]\n    key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1)\n\n    matmul_result = torch.zeros(\n        1, 1, 1,\n        dtype=query_layer.dtype,\n        device=query_layer.device,\n    )\n\n    matmul_result = torch.baddbmm(\n        matmul_result,\n        query_layer.transpose(0, 1),  # [b * np, sq, hn]\n        key_layer.transpose(0, 1).transpose(1, 2),  # [b * np, hn, sk]\n        beta=0.0,\n        alpha=1.0,\n    )\n\n    # change view to [b, np, sq, sk]\n    attention_scores = matmul_result.view(*output_size)\n\n    if self.scale_mask_softmax:\n        self.scale_mask_softmax.scale = query_key_layer_scaling_coeff\n        attention_probs = self.scale_mask_softmax(attention_scores, attention_mask.contiguous())\n    else:\n        if not (attention_mask == 0).all():\n            # if auto-regressive, skip\n            attention_scores.masked_fill_(attention_mask, -10000.0)\n        dtype = attention_scores.dtype\n        attention_scores = attention_scores.float()\n        attention_scores = attention_scores * query_key_layer_scaling_coeff\n\n        attention_probs = F.softmax(attention_scores, dim=-1)\n\n        attention_probs = attention_probs.type(dtype)\n\n    # =========================\n    # Context layer. [sq, b, hp]\n    # =========================\n\n    # value_layer -> context layer.\n    # [sk, b, np, hn] --> [b, np, sq, hn]\n\n    # context layer shape: [b, np, sq, hn]\n    output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3))\n\n    # change view [sk, b * np, hn]\n    value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1)\n\n    # change view [b * np, sq, sk]\n    attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1)\n\n    # matmul: [b * np, sq, hn]\n    context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))\n\n    # change view [b, np, sq, hn]\n    context_layer = context_layer.view(*output_size)\n\n    # [b, np, sq, hn] --> [sq, b, np, hn]\n    context_layer = context_layer.permute(2, 0, 1, 3).contiguous()\n\n    # [sq, b, np, hn] --> [sq, b, hp]\n    new_context_layer_shape = context_layer.size()[:-2] + (hidden_size_per_partition,)\n    context_layer = context_layer.view(*new_context_layer_shape)\n\n    outputs = (context_layer, present, attention_probs)\n\n    return outputs\n\n\ndef default_init(cls, *args, **kwargs):\n    return cls(*args, **kwargs)\n\n\nclass SelfAttention(torch.nn.Module):\n    def __init__(self, hidden_size, num_attention_heads,\n                 layer_id, hidden_size_per_attention_head=None, bias=True,\n                 params_dtype=torch.float, position_encoding_2d=True, empty_init=True):\n        if empty_init:\n            init_method = skip_init\n        else:\n            init_method = default_init\n        super(SelfAttention, self).__init__()\n\n        self.layer_id = layer_id\n        self.hidden_size = hidden_size\n        self.hidden_size_per_partition = hidden_size\n        self.num_attention_heads = num_attention_heads\n        self.num_attention_heads_per_partition = num_attention_heads\n        self.position_encoding_2d = position_encoding_2d\n        self.rotary_emb = RotaryEmbedding(\n            self.hidden_size // (self.num_attention_heads * 2)\n            if position_encoding_2d\n            else self.hidden_size // self.num_attention_heads,\n            base=10000,\n            precision=torch.half,\n            learnable=False,\n        )\n\n        self.scale_mask_softmax = None\n\n        if hidden_size_per_attention_head is None:\n            self.hidden_size_per_attention_head = hidden_size // num_attention_heads\n        else:\n            self.hidden_size_per_attention_head = hidden_size_per_attention_head\n\n        self.inner_hidden_size = num_attention_heads * self.hidden_size_per_attention_head\n\n        # Strided linear layer.\n        self.query_key_value = init_method(\n            torch.nn.Linear,\n            hidden_size,\n            3 * self.inner_hidden_size,\n            bias=bias,\n            dtype=params_dtype,\n        )\n\n        self.dense = init_method(\n            torch.nn.Linear,\n            self.inner_hidden_size,\n            hidden_size,\n            bias=bias,\n            dtype=params_dtype,\n        )\n\n    @staticmethod\n    def attention_mask_func(attention_scores, attention_mask):\n        attention_scores.masked_fill_(attention_mask, -10000.0)\n        return attention_scores\n\n    def split_tensor_along_last_dim(self, tensor, num_partitions,\n                                    contiguous_split_chunks=False):\n        \"\"\"Split a tensor along its last dimension.\n        Arguments:\n            tensor: input tensor.\n            num_partitions: number of partitions to split the tensor\n            contiguous_split_chunks: If True, make each chunk contiguous\n                                    in memory.\n        \"\"\"\n        # Get the size and dimension.\n        last_dim = tensor.dim() - 1\n        last_dim_size = tensor.size()[last_dim] // num_partitions\n        # Split.\n        tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)\n        # Note: torch.split does not create contiguous tensors by default.\n        if contiguous_split_chunks:\n            return tuple(chunk.contiguous() for chunk in tensor_list)\n\n        return tensor_list\n\n    def forward(\n            self,\n            hidden_states: torch.Tensor,\n            position_ids,\n            attention_mask: torch.Tensor,\n            layer_id,\n            layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,\n            use_cache: bool = False,\n            output_attentions: bool = False,\n    ):\n        \"\"\"\n        hidden_states: [seq_len, batch, hidden_size]\n        attention_mask: [(1, 1), seq_len, seq_len]\n        \"\"\"\n\n        # [seq_len, batch, 3 * hidden_size]\n        mixed_raw_layer = self.query_key_value(hidden_states)\n\n        # [seq_len, batch, 3 * hidden_size] --> [seq_len, batch, num_attention_heads, 3 * hidden_size_per_attention_head]\n        new_tensor_shape = mixed_raw_layer.size()[:-1] + (\n            self.num_attention_heads_per_partition,\n            3 * self.hidden_size_per_attention_head,\n        )\n        mixed_raw_layer = mixed_raw_layer.view(*new_tensor_shape)\n\n        # [seq_len, batch, num_attention_heads, hidden_size_per_attention_head]\n        (query_layer, key_layer, value_layer) = self.split_tensor_along_last_dim(mixed_raw_layer, 3)\n\n        if self.position_encoding_2d:\n            q1, q2 = query_layer.chunk(2, dim=(query_layer.ndim - 1))\n            k1, k2 = key_layer.chunk(2, dim=(key_layer.ndim - 1))\n            cos, sin = self.rotary_emb(q1, seq_len=position_ids.max() + 1)\n            position_ids, block_position_ids = position_ids[:, 0, :].transpose(0, 1).contiguous(), \\\n                position_ids[:, 1, :].transpose(0, 1).contiguous()\n            q1, k1 = apply_rotary_pos_emb_index(q1, k1, cos, sin, position_ids)\n            q2, k2 = apply_rotary_pos_emb_index(q2, k2, cos, sin, block_position_ids)\n            query_layer = torch.concat([q1, q2], dim=(q1.ndim - 1))\n            key_layer = torch.concat([k1, k2], dim=(k1.ndim - 1))\n        else:\n            position_ids = position_ids.transpose(0, 1)\n            cos, sin = self.rotary_emb(value_layer, seq_len=position_ids.max() + 1)\n            # [seq_len, batch, num_attention_heads, hidden_size_per_attention_head]\n            query_layer, key_layer = apply_rotary_pos_emb_index(query_layer, key_layer, cos, sin, position_ids)\n\n        # [seq_len, batch, hidden_size]\n        context_layer, present, attention_probs = attention_fn(\n            self=self,\n            query_layer=query_layer,\n            key_layer=key_layer,\n            value_layer=value_layer,\n            attention_mask=attention_mask,\n            hidden_size_per_partition=self.hidden_size_per_partition,\n            layer_id=layer_id,\n            layer_past=layer_past,\n            use_cache=use_cache\n        )\n\n        output = self.dense(context_layer)\n\n        outputs = (output, present)\n\n        if output_attentions:\n            outputs += (attention_probs,)\n\n        return outputs  # output, present, attention_probs\n\n\nclass GEGLU(torch.nn.Module):\n    def __init__(self):\n        super().__init__()\n        self.activation_fn = F.gelu\n\n    def forward(self, x):\n        # dim=-1 breaks in jit for pt<1.10\n        x1, x2 = x.chunk(2, dim=(x.ndim - 1))\n        return x1 * self.activation_fn(x2)\n\n\nclass GLU(torch.nn.Module):\n    def __init__(self, hidden_size, inner_hidden_size=None,\n                 layer_id=None, bias=True, activation_func=gelu, params_dtype=torch.float, empty_init=True):\n        super(GLU, self).__init__()\n        if empty_init:\n            init_method = skip_init\n        else:\n            init_method = default_init\n        self.layer_id = layer_id\n        self.activation_func = activation_func\n\n        # Project to 4h.\n        self.hidden_size = hidden_size\n        if inner_hidden_size is None:\n            inner_hidden_size = 4 * hidden_size\n        self.inner_hidden_size = inner_hidden_size\n        self.dense_h_to_4h = init_method(\n            torch.nn.Linear,\n            self.hidden_size,\n            self.inner_hidden_size,\n            bias=bias,\n            dtype=params_dtype,\n        )\n        # Project back to h.\n        self.dense_4h_to_h = init_method(\n            torch.nn.Linear,\n            self.inner_hidden_size,\n            self.hidden_size,\n            bias=bias,\n            dtype=params_dtype,\n        )\n\n    def forward(self, hidden_states):\n        \"\"\"\n        hidden_states: [seq_len, batch, hidden_size]\n        \"\"\"\n\n        # [seq_len, batch, inner_hidden_size]\n        intermediate_parallel = self.dense_h_to_4h(hidden_states)\n\n        intermediate_parallel = self.activation_func(intermediate_parallel)\n\n        output = self.dense_4h_to_h(intermediate_parallel)\n\n        return output\n\n\nclass GLMBlock(torch.nn.Module):\n    def __init__(\n            self,\n            hidden_size,\n            num_attention_heads,\n            layernorm_epsilon,\n            layer_id,\n            inner_hidden_size=None,\n            hidden_size_per_attention_head=None,\n            layernorm=LayerNorm,\n            use_bias=True,\n            params_dtype=torch.float,\n            num_layers=28,\n            position_encoding_2d=True,\n            empty_init=True\n    ):\n        super(GLMBlock, self).__init__()\n        # Set output layer initialization if not provided.\n\n        self.layer_id = layer_id\n\n        # Layernorm on the input data.\n        self.input_layernorm = layernorm(hidden_size, eps=layernorm_epsilon)\n\n        self.position_encoding_2d = position_encoding_2d\n\n        # Self attention.\n        self.attention = SelfAttention(\n            hidden_size,\n            num_attention_heads,\n            layer_id,\n            hidden_size_per_attention_head=hidden_size_per_attention_head,\n            bias=use_bias,\n            params_dtype=params_dtype,\n            position_encoding_2d=self.position_encoding_2d,\n            empty_init=empty_init\n        )\n\n        # Layernorm on the input data.\n        self.post_attention_layernorm = layernorm(hidden_size, eps=layernorm_epsilon)\n\n        self.num_layers = num_layers\n\n        # GLU\n        self.mlp = GLU(\n            hidden_size,\n            inner_hidden_size=inner_hidden_size,\n            bias=use_bias,\n            layer_id=layer_id,\n            params_dtype=params_dtype,\n            empty_init=empty_init\n        )\n\n    def forward(\n            self,\n            hidden_states: torch.Tensor,\n            position_ids,\n            attention_mask: torch.Tensor,\n            layer_id,\n            layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,\n            use_cache: bool = False,\n            output_attentions: bool = False,\n    ):\n        \"\"\"\n        hidden_states: [seq_len, batch, hidden_size]\n        attention_mask: [(1, 1), seq_len, seq_len]\n        \"\"\"\n\n        # Layer norm at the begining of the transformer layer.\n        # [seq_len, batch, hidden_size]\n        attention_input = self.input_layernorm(hidden_states)\n\n        # Self attention.\n        attention_outputs = self.attention(\n            attention_input,\n            position_ids,\n            attention_mask=attention_mask,\n            layer_id=layer_id,\n            layer_past=layer_past,\n            use_cache=use_cache,\n            output_attentions=output_attentions\n        )\n\n        attention_output = attention_outputs[0]\n\n        outputs = attention_outputs[1:]\n\n        # Residual connection.\n        alpha = (2 * self.num_layers) ** 0.5\n        hidden_states = attention_input * alpha + attention_output\n\n        mlp_input = self.post_attention_layernorm(hidden_states)\n\n        # MLP.\n        mlp_output = self.mlp(mlp_input)\n\n        # Second residual connection.\n        output = mlp_input * alpha + mlp_output\n\n        if use_cache:\n            outputs = (output,) + outputs\n        else:\n            outputs = (output,) + outputs[1:]\n\n        return outputs  # hidden_states, present, attentions\n\n\nclass ChatGLMPreTrainedModel(PreTrainedModel):\n    \"\"\"\n    An abstract class to handle weights initialization and\n    a simple interface for downloading and loading pretrained models.\n    \"\"\"\n\n    is_parallelizable = False\n    supports_gradient_checkpointing = True\n    config_class = ChatGLMConfig\n    base_model_prefix = \"transformer\"\n    _no_split_modules = [\"GLMBlock\"]\n\n    def __init__(self, *inputs, **kwargs):\n        super().__init__(*inputs, **kwargs)\n\n    def _init_weights(self, module: nn.Module):\n        \"\"\"Initialize the weights.\"\"\"\n        return\n\n    def get_masks(self, input_ids, device):\n        batch_size, seq_length = input_ids.shape\n        context_lengths = [seq.tolist().index(self.config.bos_token_id) for seq in input_ids]\n        attention_mask = torch.ones((batch_size, seq_length, seq_length), device=device)\n        attention_mask.tril_()\n        for i, context_length in enumerate(context_lengths):\n            attention_mask[i, :, :context_length] = 1\n        attention_mask.unsqueeze_(1)\n        attention_mask = (attention_mask < 0.5).bool()\n\n        return attention_mask\n\n    def get_position_ids(self, input_ids, mask_positions, device, use_gmasks=None):\n        batch_size, seq_length = input_ids.shape\n        if use_gmasks is None:\n            use_gmasks = [False] * batch_size\n        context_lengths = [seq.tolist().index(self.config.bos_token_id) for seq in input_ids]\n        if self.position_encoding_2d:\n            position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1)\n            for i, context_length in enumerate(context_lengths):\n                position_ids[i, context_length:] = mask_positions[i]\n            block_position_ids = [torch.cat((\n                torch.zeros(context_length, dtype=torch.long, device=device),\n                torch.arange(seq_length - context_length, dtype=torch.long, device=device) + 1\n            )) for context_length in context_lengths]\n            block_position_ids = torch.stack(block_position_ids, dim=0)\n            position_ids = torch.stack((position_ids, block_position_ids), dim=1)\n        else:\n            position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1)\n            for i, context_length in enumerate(context_lengths):\n                if not use_gmasks[i]:\n                    position_ids[context_length:] = mask_positions[i]\n\n        return position_ids\n\n    def _set_gradient_checkpointing(self, module, value=False):\n        if isinstance(module, ChatGLMModel):\n            module.gradient_checkpointing = value\n\n\nCHATGLM_6B_START_DOCSTRING = r\"\"\"\n    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class.\n    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general\n    usage and behavior.\n\n    Parameters:\n        config ([`~ChatGLM6BConfig`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the configuration.\n            Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\nCHATGLM_6B_INPUTS_DOCSTRING = r\"\"\"\n    Args:\n        input_ids (`torch.LongTensor` of shape `({0})`):\n            Indices of input sequence tokens in the vocabulary.\n\n            Indices can be obtained using [`ChatGLM6BTokenizer`].\n            See [`PreTrainedTokenizer.encode`] and\n            [`PreTrainedTokenizer.__call__`] for details.\n\n            [What are input IDs?](../glossary#input-ids)\n        attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n            - 1 for tokens that are **not masked**,\n            - 0 for tokens that are **masked**.\n\n            [What are attention masks?](../glossary#attention-mask)\n        token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n            Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`:\n\n            - 0 corresponds to a *sentence A* token,\n            - 1 corresponds to a *sentence B* token.\n\n            [What are token type IDs?](../glossary#token-type-ids)\n        position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n            Indices of positions of each input sequence tokens in the position embeddings.\n            Selected in the range `[0, config.max_position_embeddings - 1]`.\n\n            [What are position IDs?](../glossary#position-ids)\n        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n            - 1 indicates the head is **not masked**,\n            - 0 indicates the head is **masked**.\n\n        inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.\n            This is useful if you want more control over how to convert *input_ids* indices into associated vectors\n            than the model's internal embedding lookup matrix.\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n    \"The bare ChatGLM-6B Model transformer outputting raw hidden-states without any specific head on top.\",\n    CHATGLM_6B_START_DOCSTRING,\n)\nclass ChatGLMModel(ChatGLMPreTrainedModel):\n    \"\"\"\n\n    The model can behave as an encoder (with only self-attention) as well\n    as a decoder, in which case a layer of cross-attention is added between\n    the self-attention layers, following the architecture described in [Attention is\n    all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani,\n    Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.\n\n    To behave as an decoder the model needs to be initialized with the\n    `is_decoder` argument of the configuration set to `True`.\n    To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder`\n    argument and `add_cross_attention` set to `True`; an\n    `encoder_hidden_states` is then expected as an input to the forward pass.\n    \"\"\"\n\n    def __init__(self, config: ChatGLMConfig, empty_init=True):\n        super().__init__(config)\n        if empty_init:\n            init_method = skip_init\n        else:\n            init_method = default_init\n        # recording parameters\n        self.max_sequence_length = config.max_sequence_length\n        self.hidden_size = config.hidden_size\n        self.params_dtype = torch.half\n        self.num_attention_heads = config.num_attention_heads\n        self.vocab_size = config.vocab_size\n        self.num_layers = config.num_layers\n        self.layernorm_epsilon = config.layernorm_epsilon\n        self.inner_hidden_size = config.inner_hidden_size\n        self.hidden_size_per_attention_head = self.hidden_size // self.num_attention_heads\n        self.position_encoding_2d = config.position_encoding_2d\n        self.pre_seq_len = config.pre_seq_len\n        self.prefix_projection = config.prefix_projection\n\n        self.word_embeddings = init_method(\n            torch.nn.Embedding,\n            num_embeddings=self.vocab_size, embedding_dim=self.hidden_size,\n            dtype=self.params_dtype\n        )\n        self.gradient_checkpointing = False\n\n        def get_layer(layer_id):\n            return GLMBlock(\n                self.hidden_size,\n                self.num_attention_heads,\n                self.layernorm_epsilon,\n                layer_id,\n                inner_hidden_size=self.inner_hidden_size,\n                hidden_size_per_attention_head=self.hidden_size_per_attention_head,\n                layernorm=LayerNorm,\n                use_bias=True,\n                params_dtype=self.params_dtype,\n                position_encoding_2d=self.position_encoding_2d,\n                empty_init=empty_init\n            )\n\n        self.layers = torch.nn.ModuleList(\n            [get_layer(layer_id) for layer_id in range(self.num_layers)]\n        )\n\n        # Final layer norm before output.\n        self.final_layernorm = LayerNorm(self.hidden_size, eps=self.layernorm_epsilon)\n\n        if self.pre_seq_len is not None:\n            for param in self.parameters():\n                param.requires_grad = False\n            self.prefix_tokens = torch.arange(self.pre_seq_len).long()\n            self.prefix_encoder = PrefixEncoder(config)\n            self.dropout = torch.nn.Dropout(0.1)\n\n            # total_params = sum(p.numel() for p in self.parameters())\n            # trainable_params = sum(p.numel() for p in self.parameters() if p.requires_grad)\n            # print(\"Using p-tuning v2: # trainable_params = {} / {}\".format(trainable_params, total_params))\n\n    def get_input_embeddings(self):\n        return self.word_embeddings\n\n    def set_input_embeddings(self, new_embeddings: torch.Tensor):\n        self.word_embeddings = new_embeddings\n\n    def get_prompt(self, batch_size, device, dtype=torch.half):\n        prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(device)\n        past_key_values = self.prefix_encoder(prefix_tokens).type(dtype)\n        past_key_values = past_key_values.view(\n            batch_size,\n            self.pre_seq_len,\n            self.num_layers * 2,\n            self.num_attention_heads,\n            self.hidden_size // self.num_attention_heads\n        )\n        # seq_len, b, nh, hidden_size\n        past_key_values = self.dropout(past_key_values)\n        past_key_values = past_key_values.permute([2, 1, 0, 3, 4]).split(2)\n        # past_key_values = [(v[0], v[1]) for v in past_key_values]\n        return past_key_values\n\n    @add_start_docstrings_to_model_forward(CHATGLM_6B_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n    @add_code_sample_docstrings(\n        checkpoint=_CHECKPOINT_FOR_DOC,\n        output_type=BaseModelOutputWithPastAndCrossAttentions,\n        config_class=_CONFIG_FOR_DOC,\n    )\n    def forward(\n            self,\n            input_ids: Optional[torch.LongTensor] = None,\n            position_ids: Optional[torch.LongTensor] = None,\n            attention_mask: Optional[torch.Tensor] = None,\n            past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,\n            inputs_embeds: Optional[torch.LongTensor] = None,\n            use_cache: Optional[bool] = None,\n            output_attentions: Optional[bool] = None,\n            output_hidden_states: Optional[bool] = None,\n            return_dict: Optional[bool] = None,\n    ) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPast]:\n\n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = (\n            output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        )\n        use_cache = use_cache if use_cache is not None else self.config.use_cache\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n        if self.gradient_checkpointing and self.training:\n            if use_cache:\n                logger.warning_once(\n                    \"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...\"\n                )\n                use_cache = False\n\n        if input_ids is not None and inputs_embeds is not None:\n            raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n        elif input_ids is not None:\n            batch_size, seq_length = input_ids.shape[:2]\n        elif inputs_embeds is not None:\n            batch_size, seq_length = inputs_embeds.shape[:2]\n        else:\n            raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n        if inputs_embeds is None:\n            inputs_embeds = self.word_embeddings(input_ids)\n\n        if past_key_values is None:\n            if self.pre_seq_len is not None:\n                past_key_values = self.get_prompt(batch_size=input_ids.shape[0], device=input_ids.device,\n                                                  dtype=inputs_embeds.dtype)\n            else:\n                past_key_values = tuple([None] * len(self.layers))\n\n            if attention_mask is None:\n                attention_mask = self.get_masks(\n                    input_ids,\n                    device=input_ids.device\n                )\n\n\n            if position_ids is None:\n                MASK, gMASK = self.config.mask_token_id, self.config.gmask_token_id\n                seqs = input_ids.tolist()\n\n                mask_positions, use_gmasks = [], []\n                for seq in seqs:\n                    mask_token = gMASK if gMASK in seq else MASK\n                    use_gmask = mask_token == gMASK\n                    mask_positions.append(seq.index(mask_token))\n                    use_gmasks.append(use_gmask)\n\n                position_ids = self.get_position_ids(\n                    input_ids,\n                    mask_positions=mask_positions,\n                    device=input_ids.device,\n                    use_gmasks=use_gmasks\n                )\n\n        if self.pre_seq_len is not None and attention_mask is not None:\n            prefix_attention_mask = torch.ones(batch_size, 1, input_ids.size(-1), self.pre_seq_len).to(\n                attention_mask.device)\n            prefix_attention_mask = (prefix_attention_mask < 0.5).bool()\n            attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=3)\n\n        # [seq_len, batch, hidden_size]\n        hidden_states = inputs_embeds.transpose(0, 1)\n\n        presents = () if use_cache else None\n        all_self_attentions = () if output_attentions else None\n        all_hidden_states = () if output_hidden_states else None\n\n        if attention_mask is None:\n            attention_mask = torch.zeros(1, 1, device=input_ids.device).bool()\n        else:\n            attention_mask = attention_mask.to(hidden_states.device)\n\n        for i, layer in enumerate(self.layers):\n\n            if output_hidden_states:\n                all_hidden_states = all_hidden_states + (hidden_states,)\n            layer_past = past_key_values[i]\n\n            if self.gradient_checkpointing and self.training:\n                layer_ret = torch.utils.checkpoint.checkpoint(\n                    layer,\n                    hidden_states,\n                    position_ids,\n                    attention_mask,\n                    torch.tensor(i),\n                    layer_past,\n                    use_cache,\n                    output_attentions\n                )\n            else:\n                layer_ret = layer(\n                    hidden_states,\n                    position_ids=position_ids,\n                    attention_mask=attention_mask,\n                    layer_id=torch.tensor(i),\n                    layer_past=layer_past,\n                    use_cache=use_cache,\n                    output_attentions=output_attentions\n                )\n\n            hidden_states = layer_ret[0]\n\n            if use_cache:\n                presents = presents + (layer_ret[1],)\n\n            if output_attentions:\n                all_self_attentions = all_self_attentions + (layer_ret[2 if use_cache else 1],)\n\n        # Final layer norm.\n        hidden_states = self.final_layernorm(hidden_states)\n\n        if output_hidden_states:\n            all_hidden_states = all_hidden_states + (hidden_states,)\n\n        if not return_dict:\n            return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)\n\n        return BaseModelOutputWithPast(\n            last_hidden_state=hidden_states,\n            past_key_values=presents,\n            hidden_states=all_hidden_states,\n            attentions=all_self_attentions,\n        )\n\n\nclass ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):\n    def __init__(self, config: ChatGLMConfig, empty_init=True):\n        super().__init__(config)\n        if empty_init:\n            init_method = skip_init\n        else:\n            init_method = default_init\n\n        # self.hidden_size = config.hidden_size\n        # self.params_dtype = torch.half\n        # self.vocab_size = config.vocab_size\n        self.max_sequence_length = config.max_sequence_length\n\n        self.position_encoding_2d = config.position_encoding_2d\n\n        self.transformer = ChatGLMModel(config, empty_init=empty_init)\n\n        self.lm_head = init_method(\n            nn.Linear,\n            config.hidden_size,\n            config.vocab_size,\n            bias=False,\n            dtype=torch.half\n        )\n\n        self.config = config\n\n        self.quantized = False\n\n        if self.config.quantization_bit:\n            self.quantize(self.config.quantization_bit, empty_init=True)\n\n    def get_output_embeddings(self):\n        return self.lm_head\n\n    def set_output_embeddings(self, new_embeddings):\n        self.lm_head = new_embeddings\n\n    def _update_model_kwargs_for_generation(\n        self,\n        outputs: ModelOutput,\n        model_kwargs: Dict[str, Any],\n        is_encoder_decoder: bool = False,\n        standardize_cache_format: bool = False,\n    ) -> Dict[str, Any]:\n        # update past_key_values\n        model_kwargs[\"past_key_values\"] = self._extract_past_from_model_output(\n            outputs, standardize_cache_format=standardize_cache_format\n        )\n\n        # update attention mask\n        if \"attention_mask\" in model_kwargs:\n            attention_mask = model_kwargs[\"attention_mask\"]\n            if attention_mask is not None and attention_mask.dtype == torch.bool:\n                attention_mask = torch.cat(\n                    [attention_mask, attention_mask.new_ones((*attention_mask.shape[:3], 1))], dim=3)\n                new_attention_mask = attention_mask[:, :, -1:].clone()\n                new_attention_mask[..., -1] = False\n                model_kwargs[\"attention_mask\"] = torch.cat(\n                    [attention_mask, new_attention_mask], dim=2\n                )\n\n        # update position ids\n        if \"position_ids\" in model_kwargs:\n            position_ids = model_kwargs[\"position_ids\"]\n            new_position_id = position_ids[..., -1:].clone()\n            new_position_id[:, 1, :] += 1\n            model_kwargs[\"position_ids\"] = torch.cat(\n                [position_ids, new_position_id], dim=-1\n            )\n\n        return model_kwargs\n\n    def prepare_inputs_for_generation(\n            self,\n            input_ids: torch.LongTensor,\n            past: Optional[torch.Tensor] = None,\n            past_key_values: Optional[torch.Tensor] = None,\n            attention_mask: Optional[torch.Tensor] = None,\n            position_ids: Optional[torch.Tensor] = None,\n            **kwargs\n    ) -> dict:\n        batch_size, seq_length = input_ids.shape\n        MASK, gMASK = self.config.mask_token_id, self.config.gmask_token_id\n        seqs = input_ids.tolist()\n        mask_positions, use_gmasks = [], []\n        for seq in seqs:\n            mask_token = gMASK if gMASK in seq else MASK\n            use_gmask = mask_token == gMASK\n            mask_positions.append(seq.index(mask_token))\n            use_gmasks.append(use_gmask)\n\n        # only last token for input_ids if past is not None\n        if past is not None or past_key_values is not None:\n            last_token = input_ids[:, -1].unsqueeze(-1)\n            if attention_mask is not None and attention_mask.dtype == torch.bool:\n                attention_mask = attention_mask[:, :, -1:]\n            else:\n                attention_mask = None\n            if position_ids is not None:\n                position_ids = position_ids[..., -1:]\n            else:\n                context_lengths = [seq.index(self.config.bos_token_id) for seq in seqs]\n                if self.position_encoding_2d:\n                    position_ids = torch.tensor(\n                        [[mask_position, seq_length - context_length] for mask_position, context_length in\n                         zip(mask_positions, context_lengths)], dtype=torch.long, device=input_ids.device).unsqueeze(-1)\n                else:\n                    position_ids = torch.tensor([mask_position for mask_position in mask_positions], dtype=torch.long,\n                                                device=input_ids.device).unsqueeze(-1)\n\n            if past is None:\n                past = past_key_values\n            return {\n                \"input_ids\": last_token,\n                \"past_key_values\": past,\n                \"position_ids\": position_ids,\n                \"attention_mask\": attention_mask\n            }\n        else:\n            if attention_mask is not None and attention_mask.dtype != torch.bool:\n                logger.warning_once(f\"The dtype of attention mask ({attention_mask.dtype}) is not bool\")\n                attention_mask = None\n            if attention_mask is None:\n                attention_mask = self.get_masks(\n                    input_ids,\n                    device=input_ids.device\n                )\n            if position_ids is None:\n                position_ids = self.get_position_ids(\n                    input_ids,\n                    device=input_ids.device,\n                    mask_positions=mask_positions,\n                    use_gmasks=use_gmasks\n                )\n\n            return {\n                \"input_ids\": input_ids,\n                \"past_key_values\": past,\n                \"position_ids\": position_ids,\n                \"attention_mask\": attention_mask\n            }\n\n    def forward(\n            self,\n            input_ids: Optional[torch.Tensor] = None,\n            position_ids: Optional[torch.Tensor] = None,\n            attention_mask: Optional[torch.Tensor] = None,\n            past_key_values: Optional[Tuple[torch.FloatTensor]] = None,\n            inputs_embeds: Optional[torch.Tensor] = None,\n            labels: Optional[torch.Tensor] = None,\n            use_cache: Optional[bool] = None,\n            output_attentions: Optional[bool] = None,\n            output_hidden_states: Optional[bool] = None,\n            return_dict: Optional[bool] = None,\n    ):\n        use_cache = use_cache if use_cache is not None else self.config.use_cache\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n        transformer_outputs = self.transformer(\n            input_ids=input_ids,\n            position_ids=position_ids,\n            attention_mask=attention_mask,\n            past_key_values=past_key_values,\n            inputs_embeds=inputs_embeds,\n            use_cache=use_cache,\n            output_attentions=output_attentions,\n            output_hidden_states=output_hidden_states,\n            return_dict=return_dict,\n        )\n\n        hidden_states = transformer_outputs[0]\n\n        lm_logits = self.lm_head(hidden_states).permute(1, 0, 2).contiguous()\n\n        loss = None\n        if labels is not None:\n            lm_logits = lm_logits.to(torch.float32)\n\n            # Shift so that tokens < n predict n\n            shift_logits = lm_logits[..., :-1, :].contiguous()\n            shift_labels = labels[..., 1:].contiguous()\n            # Flatten the tokens\n            loss_fct = CrossEntropyLoss(ignore_index=-100)\n            loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))\n\n            lm_logits = lm_logits.to(hidden_states.dtype)\n            loss = loss.to(hidden_states.dtype)\n\n        if not return_dict:\n            output = (lm_logits,) + transformer_outputs[1:]\n            return ((loss,) + output) if loss is not None else output\n\n        return CausalLMOutputWithPast(\n            loss=loss,\n            logits=lm_logits,\n            past_key_values=transformer_outputs.past_key_values,\n            hidden_states=transformer_outputs.hidden_states,\n            attentions=transformer_outputs.attentions,\n        )\n\n    @staticmethod\n    def _reorder_cache(\n            past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor\n    ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:\n        \"\"\"\n        This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or\n        [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct\n        beam_idx at every generation step.\n\n        Output shares the same memory storage as `past`.\n        \"\"\"\n        return tuple(\n            (\n                layer_past[0].index_select(1, beam_idx.to(layer_past[0].device)),\n                layer_past[1].index_select(1, beam_idx.to(layer_past[1].device)),\n            )\n            for layer_past in past\n        )\n\n    def process_response(self, response):\n        response = response.strip()\n        response = response.replace(\"[[训练时间]]\", \"2023年\")\n        punkts = [\n            [\",\", \"，\"],\n            [\"!\", \"！\"],\n            [\":\", \"：\"],\n            [\";\", \"；\"],\n            [\"\\?\", \"？\"],\n        ]\n        for item in punkts:\n            response = re.sub(r\"([\\u4e00-\\u9fff])%s\" % item[0], r\"\\1%s\" % item[1], response)\n            response = re.sub(r\"%s([\\u4e00-\\u9fff])\" % item[0], r\"%s\\1\" % item[1], response)\n        return response\n\n    @torch.no_grad()\n    def chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 2048, num_beams=1,\n             do_sample=True, top_p=0.7, temperature=0.95, logits_processor=None, **kwargs):\n        if history is None:\n            history = []\n        if logits_processor is None:\n            logits_processor = LogitsProcessorList()\n        logits_processor.append(InvalidScoreLogitsProcessor())\n        gen_kwargs = {\"max_length\": max_length, \"num_beams\": num_beams, \"do_sample\": do_sample, \"top_p\": top_p,\n                      \"temperature\": temperature, \"logits_processor\": logits_processor, **kwargs}\n        if not history:\n            prompt = query\n        else:\n            prompt = \"\"\n            for i, (old_query, response) in enumerate(history):\n                prompt += \"[Round {}]\\n问：{}\\n答：{}\\n\".format(i, old_query, response)\n            prompt += \"[Round {}]\\n问：{}\\n答：\".format(len(history), query)\n        inputs = tokenizer([prompt], return_tensors=\"pt\")\n        inputs = inputs.to(self.device)\n        outputs = self.generate(**inputs, **gen_kwargs)\n        outputs = outputs.tolist()[0][len(inputs[\"input_ids\"][0]):]\n        response = tokenizer.decode(outputs)\n        response = self.process_response(response)\n        history = history + [(query, response)]\n        return response, history\n\n    @torch.no_grad()\n    def stream_chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 2048,\n                    do_sample=True, top_p=0.7, temperature=0.95, logits_processor=None, **kwargs):\n        if history is None:\n            history = []\n        if logits_processor is None:\n            logits_processor = LogitsProcessorList()\n        logits_processor.append(InvalidScoreLogitsProcessor())\n        gen_kwargs = {\"max_length\": max_length, \"do_sample\": do_sample, \"top_p\": top_p,\n                      \"temperature\": temperature, \"logits_processor\": logits_processor, **kwargs}\n        if not history:\n            prompt = query\n        else:\n            prompt = \"\"\n            for i, (old_query, response) in enumerate(history):\n                prompt += \"[Round {}]\\n问：{}\\n答：{}\\n\".format(i, old_query, response)\n            prompt += \"[Round {}]\\n问：{}\\n答：\".format(len(history), query)\n        inputs = tokenizer([prompt], return_tensors=\"pt\")\n        inputs = inputs.to(self.device)\n        for outputs in self.stream_generate(**inputs, **gen_kwargs):\n            outputs = outputs.tolist()[0][len(inputs[\"input_ids\"][0]):]\n            response = tokenizer.decode(outputs)\n            response = self.process_response(response)\n            new_history = history + [(query, response)]\n            yield response, new_history\n\n    @torch.no_grad()\n    def stream_generate(\n            self,\n            input_ids,\n            generation_config: Optional[GenerationConfig] = None,\n            logits_processor: Optional[LogitsProcessorList] = None,\n            stopping_criteria: Optional[StoppingCriteriaList] = None,\n            prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,\n            **kwargs,\n    ):\n        batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]\n\n        if generation_config is None:\n            generation_config = self.generation_config\n        generation_config = copy.deepcopy(generation_config)\n        model_kwargs = generation_config.update(**kwargs)\n        bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id\n\n        if isinstance(eos_token_id, int):\n            eos_token_id = [eos_token_id]\n\n        has_default_max_length = kwargs.get(\"max_length\") is None and generation_config.max_length is not None\n        if has_default_max_length and generation_config.max_new_tokens is None:\n            warnings.warn(\n                f\"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. \"\n                \"This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we\"\n                \" recommend using `max_new_tokens` to control the maximum length of the generation.\",\n                UserWarning,\n            )\n        elif generation_config.max_new_tokens is not None:\n            generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length\n            if not has_default_max_length:\n                logger.warn(\n                    f\"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=\"\n                    f\"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. \"\n                    \"Please refer to the documentation for more information. \"\n                    \"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)\",\n                    UserWarning,\n                )\n\n        if input_ids_seq_length >= generation_config.max_length:\n            input_ids_string = \"decoder_input_ids\" if self.config.is_encoder_decoder else \"input_ids\"\n            logger.warning(\n                f\"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to\"\n                f\" {generation_config.max_length}. This can lead to unexpected behavior. You should consider\"\n                \" increasing `max_new_tokens`.\"\n            )\n\n        # 2. Set generation parameters if not already defined\n        logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()\n        stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()\n\n        logits_processor = self._get_logits_processor(\n            generation_config=generation_config,\n            input_ids_seq_length=input_ids_seq_length,\n            encoder_input_ids=input_ids,\n            prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,\n            logits_processor=logits_processor,\n        )\n\n        stopping_criteria = self._get_stopping_criteria(\n            generation_config=generation_config, stopping_criteria=stopping_criteria\n        )\n        logits_warper = self._get_logits_warper(generation_config)\n\n        unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)\n        scores = None\n        while True:\n            model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)\n            # forward pass to get next token\n            outputs = self(\n                **model_inputs,\n                return_dict=True,\n                output_attentions=False,\n                output_hidden_states=False,\n            )\n\n            next_token_logits = outputs.logits[:, -1, :]\n\n            # pre-process distribution\n            next_token_scores = logits_processor(input_ids, next_token_logits)\n            next_token_scores = logits_warper(input_ids, next_token_scores)\n\n            # sample\n            probs = nn.functional.softmax(next_token_scores, dim=-1)\n            if generation_config.do_sample:\n                next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)\n            else:\n                next_tokens = torch.argmax(probs, dim=-1)\n\n            # update generated ids, model inputs, and length for next step\n            input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)\n            model_kwargs = self._update_model_kwargs_for_generation(\n                outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder\n            )\n            unfinished_sequences = unfinished_sequences.mul((sum(next_tokens != i for i in eos_token_id)).long())\n\n            # stop when each sentence is finished, or if we exceed the maximum length\n            if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):\n                break\n            yield input_ids\n\n    def quantize(self, bits: int, empty_init=False, **kwargs):\n        if bits == 0:\n            return\n\n        from .quantization import quantize\n\n        if self.quantized:\n            logger.info(\"Already quantized.\")\n            return self\n\n        self.quantized = True\n\n        self.config.quantization_bit = bits\n\n        self.transformer = quantize(self.transformer, bits, empty_init=empty_init, **kwargs)\n        return self\n"
  },
  {
    "path": "chatglm/quantization.py",
    "content": "from torch.nn import Linear\nfrom torch.nn.parameter import Parameter\n\nimport bz2\nimport torch\nimport base64\nimport ctypes\nfrom transformers.utils import logging\n\nfrom typing import List\nfrom functools import partial\n\nlogger = logging.get_logger(__name__)\n\ntry:\n    from cpm_kernels.kernels.base import LazyKernelCModule, KernelFunction, round_up\n\n    class Kernel:\n        def __init__(self, code: bytes, function_names: List[str]):\n            self.code = code\n            self._function_names = function_names\n            self._cmodule = LazyKernelCModule(self.code)\n\n            for name in self._function_names:\n                setattr(self, name, KernelFunction(self._cmodule, name))\n\n    quantization_code = \"$QlpoOTFBWSZTWU9yuJUAQHN//////////f/n/8/n///n//bt4dTidcVx8X3V9FV/92/v4B7/AD5FBQFAAAChSgKpFCFAFVSigUAAAEKhSgUUqgFBKigqVREQAABQBQIANDTTIGI00BkZBkNGE0A0BkBkGQGRkaNAaAGQNBoGgDIAAYIGTI0DQAQAaGmmQMRpoDIyDIaMJoBoDIDIMgMjI0aA0AMgaDQNAGQAAwQMmRoGgAgA0NNMgYjTQGRkGQ0YTQDQGQGQZAZGRo0BoAZA0GgaAMgABggZMjQNABABoaaZAxGmgMjIMhowmgGgMgMgyAyMjRoDQAyBoNA0AZAADBAyZGgaAAmqU1NEgJqnptU/Sn4jRR6J6epk2pqb1Q/SgAPUGgyNNGjQ2SBpoAZAAGg0NB6mgDIAAAAA2oaApSREBNAARhGiYEaEwU8pvImlP0k2aam1GaGqbFNM1MHpTwmkepmyU9R6nqPKekHqNNPUxNGhp6n6p6QaZ6o9TG1GMqcoV9ly6nRanHlq6zPNbnGZNi6HSug+2nPiZ13XcnFYZW+45W11CumhzYhchOJ2GLLV1OBjBjGf4TptOddTSOcVxhqYZMYwZXZZY00zI1paX5X9J+b+f4e+x43RXSxXPOdquiGpduatGyXneN696M9t4HU2eR5XX/kPhP261NTx3JO1Ow7LyuDmeo9a7d351T1ZxnvnrvYnrXv/hXxPCeuYx2XsNmO003eg9J3Z6U7b23meJ4ri01OdzTk9BNO96brz+qT5nuvvH3ds/G+m/JcG/F2XYuhXlvO+jP7U3XgrzPN/lr8Sf1n6j4j7jZs+s/T0tNaNNYzTs12rxjwztHlnire3Nzc3N1wuBwOBwXBvZfoHpD7rFmR99V5vj3aXza3xdBbXMalubTg/jIv5dfAi54Pdc75j4z412n3Npj3Ld/ENm7a3b/Cod6h/ret1/5vn/C+l+gdslMvgPSLJ8d8q+U66fevYn/tW1chleEtNTGlcHCbLRlq0tHzF5tsbbZZfHjjLgZu42XCuC3NrdjTasZGNzgxPIrGqp7r3p7L2p5XjnpPSmTd5XtzqnB6U87zzg1Ol0zd0zsLszxR6lkxp35u6/teL0L0W922cR7Lu1lpL9CsHirzuM2T+BgsyViT6LHcm0/Vr6U/7LGGyJeqTEjt0PHWhF5mCT7R9mtlDwriYv0Tyr/OxYt6qp5r0mPVT0608TqnqMZaarU2nFwrTzzlrs1ed7z1ux60wyr4ydCaTi3enW8x68x0zU7tXSlcmPSW1mGpWJMg4zmPC2lK96tp0OE80y4MfEvnZj8zGluR6b22ki1Ou9V2nCd9xovcPvcYMZYy0lvN60ScZ45vN6yeCeeXFb1lVjnnCar5fwXwE2bzJ4HI1XVPXfXZMm44GUsMpYsmLB65TuVdm0cl0b+i/wGNN66XjeV7zuPpHcnK/juhhjdfId5jMdE5nN0dGmmm2zZs2cexD5n9p/dY352XsvXHaZNWWsmmS1atjR452nYudzvqv2HMRyvNNnlMcDl3R2+yx2uVrBubTW9icHDVtbNXlZm7jma1rM4VurZZd2y6nUau7ZXZ7bVU+mnoOVxZGMrVmvX60605JwmzGZhhhjTWtaaaMaaGTGmNMZasY0iX8VMUl8eepaIrzGSpemWOQyZORk2bNpjUybMmxqYmknCGCFynutfksaZpjTNMaaatM0xsxcGR0sociNqxNSmhhR1ZJPbsn8qyF0t2qH6iYBclclalbtTTcHTDsPaX6rlnElph2Jyumumtynv2Kk8GI7rsvXbIcJgHJOSaSXnnGaI3m87RtVXJOZ/YtgdTE6Wpha6ZlE8ayXkef1fh602r2WwvfMXtMdLlkfnLFdYYwYso+bWqm7yJqHXZGw2nrS5ZanSYnWlxBxMF1V940K2wdrI7R6OYf7DGGamMmTSbRhlS45xmVOumF1EyPCmHrrN8wwZOOrdNtLeMtzFzDlWnfTBxMk2NaXIZHBYxYLD4w8yju0ao65Vz1OIXoS9dLanwCe1PWrYuWMqf1if1z2k2yYfKJ741PDgno1ZQ8DRqvUny3mNoWTzGO6m1DkrJI8JiR5cSd+vZdGOO8nrMoc5+NDUFsMSXaZJeNlMmGLtJsovOsUp7I9S5VojKxF6bTVEelXqlfJobQr3LozSh2Jk7VcrVMfhXqszGWMzNqGhqZY0OadxkyyMssKugZR0KNFXBHlqwmJgTE/BNVMk6ItJXZMR0H47GpXv/DMOvNkmVuaV1PRfEdxuqc7Hcd+ZV/zTLaRxWk0nl9CdCeM6mn5rstHIBcpiuwmUZXeq81DacHI2rmrZ5SuE5mOZd6LQrZg9mx32TprA8BMo5jKN6yLTCi3WzQaZSuhzTtM1fUTGVpG8Tw+KXI0tjEpiWxtLYynOlktSbVlaI5kxP8TDH8kx50xoxi5KcA4pcja8KWLRlO/Ks6q06ergnvm1ca3Tq8Uw7LTUsmWyctXPWmpitl/uvGcWTGXGuAXDfhqazGmjkxcJW5hMMMMpYsXl2TZYtVOddG3XCarUt6Ptq9CZXSNzyuRzqRZOjsxdBbFVz6OA5HI43r1jityVlVpVkxmOsyaYWE1NTGq1sOVh36mHMcxtSvcy70edG0ZGR3I1Go1GRlV7mWWo1G0ZGRqlvH40l7o4m5xMWLLLYyNjnqc8556mdPqLJ31n/1nWOncxzG1tizrHs/Z+d2vP/B/l8wdJ6rHUn2nbbDq4p6htFtYzMMMTaZis1K5GKzGNmxhmUx2DDlZ/qNnIx41xnaMfCZWYaZWtNLTNW8ND4Fw1MyZOCdM428suKG1ehW8TesOydg7J+YYcD4cYR+8dFK6M4E3HM9ZfRNNL+Sn6rsl4DsrDl2HpPCnfxjGXtbZtYys1ttlyJ4T+BvexjGWRjMszK4Jpc77D3GyuVD7q0+G8m9G+2+rGm7cOR2y7FdtY2XUYx/oNlfRYxhMYyYZkyyg55enna9Kt/FFi6GMMwYwdwxWgxGMLKYmUyGExTKMZkMFhkymKuh0NOBNnBu+23LdwDoZYYzGGMxtORaTU1pjTGWTTGGtMrNWUsyyTTLLG1qy2ZjbK2DBllWqxMtBMaYZQmcE7zvvRcTkclUwdkxTaSdyySt/7fpL+T1v516Ji97fwr5JbLu305zMn5+GMTTZ9F+y7ExwmGVfG44yxn3dLv6l5i+Wth1jCrDq21nW9LqvvDzz3Vf3LLH/O/32TJ/erx3bXftO4eF+G956D952K/An4NfvOpjFjExjevP/UmE0fIoZXx6/w6lX/no3D0bLt+ixjieBM6ksRd0yB4Lt2SwYNE+gd1detlZWUnpiZfGfFaK+4PyCa/v18V8X75pe9fLXzp7l3VjF76vWZmHwGz1IZNWT7b8yddJ4q5kyrVdfru6atWc7bVYztL9Jf4GXvT+Y8m9/YsXP6H018a8D4XVOqvfzqeR+6yZOD8dPv0+U7/q5Pl+2dNb0MjzGVH5p6MNQ7cOWvw62U9aHE8DprDek+McLyvDz+te+9Zhq5+YTruufMcWMabqysTmZVWjKPfnK0wyVcrsuhjZRdLkHNvD72b9abriOSGIxiLixMOoalNPXzy+wT/tf+U6HHONfsz+xe8ufHBdQWWGWLA9if0rsnmrxK5LvRZQeWsTCsrmOYy8VteVfuRfcVTtDLItLIsMYxZLdU/DbtSemxF6Z6Zo5WBXE4tFdCyVMMXMTEMZXVlS6Xec2T4e0tHsRcEuWshcJ2YsNF5rUx1E8ifCq6Z+ZP7qdCeu/aTwFd53l16/o0NOw6O3dLavP4Hbi4RdmuDk6DoYaninC0+o4uZjbJ7Rxeu0/FbuFg+q7DVS6fQe0rZ6NDGUNNU6DEqOaLTicKnYZMnBWruljQxoaS3dZhocDge0bSTyOvdAbG5hxe2xji7E/L55xX13wWNDi6HCekcFxfCPGxY0MXC+s7afWaMdDyjyr+o8Rudm/NabOZvdl274zH4f5XK9z6On1Pe/K5TdPAslg77BjuO6Y3eO7GqvOPG/stknp1leyvLL0Z7bl9I4noMvLkzytLhWYzrOZzLXCORe028rORzOg4N/L0HlMOQ3Pgmnbb6KczlabORpu980q37TBqRu0/p3PO6234Bl03Ynuz+9W7gnsEcmvYaYY3aMYY0wx3pYd+ujsXauWdaY5Xkbtl23fPzFHiDB/QMo0yFjBllYxTQYYyxkrwn7JufwJ/PfgJ+C83X69ni6zvXcnyXabv0ncbLwsceS+RNlyN2mnneJtX0ngYO0+e+0+UnA+Wch3ji8hj5an4h+i6XBySU4n+R0roVcbw5yvHrmr4Yw8Y7x6c+9POPYHI5HI5HI5HI5HGXGww4nE4nrVyOR8XeqPEO7PLOiukYa3Novk5hV4cdtYZLI93e+uxff2jRo0aNGjRo0aNG1bVtW1dy3m83m8+tQ5ZzHw3nObwOu8La9Rc1dtkdS8A3eTk823tnktXWlxN6Oixe06zrN70Isd9jiOgZFq9yfkPqP/SLhN2Myl8jDM43bl1nbcb4cO57jlh8Jow6pzXZdL4dyODTuuhu77FyO27DdwdRxmvO+O+3N2+BdqyTwLHVczDVY4UPE4O66/ZO2cx1LFzVdSXtF7G4HMbrauOHRw6c8FdZ5m9fHZHYZXfTlZquyynSyTTKke6vcffSD9pzPA/G7n7jxPmuhc1DHMynPMrGL6AdewYmwu5ko+UUyTwrMv27rPH1v1nGqd87+p6N6LU8k3NEng53xXyHS97+44OSg/sy/hn+Se6yfYNjW0/uTgP+PvWYzLMmjhcLB/gGpri6H83/84eUXWT6T9Hsv7785z/7z4icpW+zfXypuR7rx/gMdZb1/wC678pcs8/2a3mDitGHxl9mfPlll5MafWWqxk/eYuTDgcNMzDGWLWvsuglNxs53GtN6uWpktlW1tZZYcuinMMWmnNnJydze3b2Y1McBxrBkXw799izLMZZYyy0TkbsGM4p03S2uVu5s/XXUdSdec6smVxZYYGpVmT8A+8ajuEyV5FatkvVru2x6uxGXXbH4A+jvgP4GMYy3iPLXzq/6z65+E005ey+cwMZD3fZcqc6xpjTFjQ0P3U+e++cPYmTIwj0nrK5NPTfl3WvpfLtXDcb2HQMudYOxFXQBor4L4T6vrOauFctYXJQ++NUWmJe5bmx1jDiZS1dTqWxo4GR8jm3fttpmPHppk9PEyv4/y8/sO07XacOmcqc0x2Vi9BvNJvN5oW8x4mOsydpidRxMYJPx06m1bqPzq9KtK8sxXNXFodD/+MYYaJTLwOhc9brCsV18oOR1i4tXChyTkq4lf4y1Ke+9axjDHqs1mfBbMXuP4Hzi+X7t8vzv7bHerrUPgPCxhjre4fXdfLNtNM+Jd+Zdh8xd8wP87uNPoPgv4W7/5P2BuxfsMabNnMnza+54Pdi5U671GPZY8CehX8Voeoo7FHpkeEc6715FwHZrIrUrHaviPUbPZHND+IhczrP6FcYvhOZ0Di/ETt0OI+YwNWR9r7tpf6WDeZKZDB1+z2IthOl1mPyb5FluvEx9h9d0NnM0Y1XPFkWIsk1WotJ0PBMmkvjvQTd0e71tfeV+8r8lQ/tpzpsmxJ+InrI/dj2UajUajVTUajatRqNRtGo1Go1Go4wjeMpZFMVV9CHbofPraLsJ3JpWV2XOoanCuFky4y3PPNxucK2uKC1Lbdb1eo+m5XomN6HfeZsabHLHRX/K+offtNGGmHWctcVcG44MdSqsOLY9VzX+Zxfxn2HPdWTpzWvkrtJ8M5zorrKcquRytJ5N5DZmcaW02l76nWO+BqPXm1A2Ry/0q71dH/mqrqeFjkYxjEXtsX8qubTk67rGycyqsdm4tZx5D6D5hhi0waaWmiaMP81Yjii5qxPlPuU/GfTL1Y5E6Jyfiq63qTa39A4J0sOGDgO9WF9bOXl0XfPRbsY2bPNKPy1YrFYrFYmRhhlTIyMjJWJYZHXuCXI8OoXsvfljGLFicNifpp2XunoPiG1wtx3p1Tah+/DD66OnVtVXP9rKbVxOnL0tR/rHtqB5UDErUVcl11D4qqvjpOcxX7armUNJB3LpW6bxVvD08e8h3odKKvyCFZBdSh2FVcST9xV3n3T8t1j7Kr9qgrqXg+13Pt5U7JCvFXVIV1YG5lRhkVYZJYYDDD4KOIMoHCp26WS8GB7uBh2zIdgq/PKyInjV2STShuoapUdCpX1yTwqq/z1VvET7Kh5nVPkO8YyxjLt2MaaMmWTLQvx3qnzltnXW0p2jxgbEtSny/Osv8Y9pLMXYoHVPAhkVdWVeODhR6q9/Sxe2liwwZWMVvFXfRkeIDxAePUPIrdJ4ey6yquzH+PD/bUOWAu05qVHtFd8rrKHSoeNIOUqrYr3FXyToqfYJgwmJdKpXXOwYYegNNGMzfZPp/t3t/DVs4zjNTN61rRqaWaa4NYbRjTa0tWwy2Y2tGN8ZO8ofNKq4j9SL7I+cSm4/6ovLV5HNXLI0jJidwrtk6ynCaP6Z++GjRlWS3tLeW129Mi9evxU9mtz6s5J3Z7M2ngTgnKvmpomxpaLCzPfmx0JWE+m3NLDDGOX47RctdYYNK5jakdqLkRlI39n590T5zctGSwwZZDJj6kW8XSi6ot2MmWWJ0DUT3nuvebBudScjZ79g8cWJ8av0k+/bE5WKd5MdbFpbDVMxu1DVMmtNZGJvq1mtRbn6M+g/kP0FwDwr7quZs7xosNGpbscyxhhd9TyJyFwbLcxlTasg75vW7TsV5K7ji44XPMMrdoj+Y3rT0Hie62nlYV/pwczzOmdLqLhYkzGMzCZWGMQzGMSsZYY6Di1t4nlJ+Em63mJxrVLxPbYxNEdgc1dU2iOKyoYYWjNrEeHTYybVk0atSa7ehuwsWMWTqn1TrnS6hYsi71d1+s+k+ic70e20fzE/VaTdxT9ZtU4GIXdeNx3X77guYYfpHeTQjaMX6brOu4OY4K7Y2d9mbHarI5ox3p4GpJ2Vd/Tst60f7j999pppjR+Q/Qf8J/VaORs3cji7FfFuN61+ui9s8hix1OCh5KGVV23BPXvZfz3CLyHpix+exi8z/KnCnosY2eunor+cxyPO/xJ0vKey9OvE9VjqaYu0x3Z3jd6o2b1T12D+F8l232lwaaacD5LE8LBxu7WTlbWraWpew8Xexjel3E+wWD4APITdNqR8F3R3T0lunCQ4GaE9R37DxeCYfcHi4xci5ovKfxVs55y2hf+65E/Xdp6jR5nrebTmi5incpkyOjs50JvrZwstbbW6kfuuQw+2mykf/EXNFzxfKTrxew929TR6bWnGL//F3JFOFCQT3K4lQ\"\n\n    kernels = Kernel(\n        bz2.decompress(base64.b64decode(quantization_code)),\n        [\n            \"int4WeightCompression\",\n            \"int4WeightExtractionFloat\",\n            \"int4WeightExtractionHalf\",\n            \"int8WeightExtractionFloat\",\n            \"int8WeightExtractionHalf\",\n        ],\n    )\nexcept Exception as exception:\n    kernels = None\n    logger.warning(\"Failed to load cpm_kernels:\" + str(exception))\n\n\nclass W8A16Linear(torch.autograd.Function):\n    @staticmethod\n    def forward(ctx, inp: torch.Tensor, quant_w: torch.Tensor, scale_w: torch.Tensor, weight_bit_width):\n        ctx.inp_shape = inp.size()\n        ctx.weight_bit_width = weight_bit_width\n        out_features = quant_w.size(0)\n        inp = inp.contiguous().view(-1, inp.size(-1))\n        weight = extract_weight_to_half(quant_w, scale_w, weight_bit_width)\n        ctx.weight_shape = weight.size()\n        output = inp.mm(weight.t())\n        ctx.save_for_backward(inp, quant_w, scale_w)\n        return output.view(*(ctx.inp_shape[:-1] + (out_features,)))\n\n    @staticmethod\n    def backward(ctx, grad_output: torch.Tensor):\n        inp, quant_w, scale_w = ctx.saved_tensors\n        weight = extract_weight_to_half(quant_w, scale_w, ctx.weight_bit_width)\n        grad_output = grad_output.contiguous().view(-1, weight.size(0))\n        grad_input = grad_output.mm(weight)\n        grad_weight = grad_output.t().mm(inp)\n        return grad_input.view(ctx.inp_shape), grad_weight.view(ctx.weight_shape), None, None\n\n\ndef compress_int4_weight(weight: torch.Tensor):  # (n, m)\n    with torch.cuda.device(weight.device):\n        n, m = weight.size(0), weight.size(1)\n        assert m % 2 == 0\n        m = m // 2\n        out = torch.empty(n, m, dtype=torch.int8, device=\"cuda\")\n        stream = torch.cuda.current_stream()\n\n        gridDim = (n, 1, 1)\n        blockDim = (min(round_up(m, 32), 1024), 1, 1)\n\n        kernels.int4WeightCompression(\n            gridDim,\n            blockDim,\n            0,\n            stream,\n            [ctypes.c_void_p(weight.data_ptr()), ctypes.c_void_p(out.data_ptr()), ctypes.c_int32(n), ctypes.c_int32(m)],\n        )\n        return out\n\n\ndef extract_weight_to_half(weight: torch.Tensor, scale_list: torch.Tensor, source_bit_width: int):\n    if source_bit_width == 8:\n        func = kernels.int8WeightExtractionHalf\n    elif source_bit_width == 4:\n        func = kernels.int4WeightExtractionHalf\n    else:\n        assert False, \"Unsupported bit-width\"\n\n    with torch.cuda.device(weight.device):\n        n, m = weight.size(0), weight.size(1)\n        out = torch.empty(n, m * (8 // source_bit_width), dtype=torch.half, device=\"cuda\")\n        stream = torch.cuda.current_stream()\n\n        gridDim = (n, 1, 1)\n        blockDim = (min(round_up(m, 32), 1024), 1, 1)\n\n        func(\n            gridDim,\n            blockDim,\n            0,\n            stream,\n            [\n                ctypes.c_void_p(weight.data_ptr()),\n                ctypes.c_void_p(scale_list.data_ptr()),\n                ctypes.c_void_p(out.data_ptr()),\n                ctypes.c_int32(n),\n                ctypes.c_int32(m),\n            ],\n        )\n        return out\n\n\nclass QuantizedLinear(Linear):\n    def __init__(self, weight_bit_width: int, weight_tensor=None, bias_tensor=None, empty_init=False, *args, **kwargs):\n        super(QuantizedLinear, self).__init__(*args, **kwargs)\n        self.weight_bit_width = weight_bit_width\n\n        shape = self.weight.shape\n        del self.weight\n\n        if weight_tensor is None or empty_init:\n            self.weight = torch.empty(\n                shape[0], shape[1] * weight_bit_width // 8, dtype=torch.int8, device=kwargs[\"device\"]\n            )\n            self.weight_scale = torch.empty(shape[0], dtype=kwargs[\"dtype\"], device=kwargs[\"device\"])\n        else:\n            self.weight_scale = (weight_tensor.abs().max(dim=-1).values / ((2 ** (weight_bit_width - 1)) - 1)).half()\n            self.weight = torch.round(weight_tensor / self.weight_scale[:, None]).to(torch.int8)\n            if weight_bit_width == 4:\n                self.weight = compress_int4_weight(self.weight)\n\n        self.weight = Parameter(self.weight.to(kwargs[\"device\"]), requires_grad=False)\n        self.weight_scale = Parameter(self.weight_scale.to(kwargs[\"device\"]), requires_grad=False)\n        if bias_tensor is not None:\n            self.bias = Parameter(bias_tensor.to(kwargs[\"device\"]), requires_grad=False)\n        else:\n            self.bias = None\n\n    def forward(self, input):\n        output = W8A16Linear.apply(input, self.weight, self.weight_scale, self.weight_bit_width)\n        if self.bias is not None:\n            output = output + self.bias\n        return output\n\n\ndef quantize(model, weight_bit_width, empty_init=False, **kwargs):\n    \"\"\"Replace fp16 linear with quantized linear\"\"\"\n\n    for layer in model.layers:\n        layer.attention.query_key_value = QuantizedLinear(\n            weight_bit_width=weight_bit_width,\n            weight_tensor=layer.attention.query_key_value.weight.to(torch.cuda.current_device()),\n            bias_tensor=layer.attention.query_key_value.bias,\n            in_features=layer.attention.query_key_value.in_features,\n            out_features=layer.attention.query_key_value.out_features,\n            bias=True,\n            dtype=torch.half,\n            device=layer.attention.query_key_value.weight.device,\n            empty_init=empty_init\n        )\n        layer.attention.dense = QuantizedLinear(\n            weight_bit_width=weight_bit_width,\n            weight_tensor=layer.attention.dense.weight.to(torch.cuda.current_device()),\n            bias_tensor=layer.attention.dense.bias,\n            in_features=layer.attention.dense.in_features,\n            out_features=layer.attention.dense.out_features,\n            bias=True,\n            dtype=torch.half,\n            device=layer.attention.dense.weight.device,\n            empty_init=empty_init\n        )\n        layer.mlp.dense_h_to_4h = QuantizedLinear(\n            weight_bit_width=weight_bit_width,\n            weight_tensor=layer.mlp.dense_h_to_4h.weight.to(torch.cuda.current_device()),\n            bias_tensor=layer.mlp.dense_h_to_4h.bias,\n            in_features=layer.mlp.dense_h_to_4h.in_features,\n            out_features=layer.mlp.dense_h_to_4h.out_features,\n            bias=True,\n            dtype=torch.half,\n            device=layer.mlp.dense_h_to_4h.weight.device,\n            empty_init=empty_init\n        )\n        layer.mlp.dense_4h_to_h = QuantizedLinear(\n            weight_bit_width=weight_bit_width,\n            weight_tensor=layer.mlp.dense_4h_to_h.weight.to(torch.cuda.current_device()),\n            bias_tensor=layer.mlp.dense_4h_to_h.bias,\n            in_features=layer.mlp.dense_4h_to_h.in_features,\n            out_features=layer.mlp.dense_4h_to_h.out_features,\n            bias=True,\n            dtype=torch.half,\n            device=layer.mlp.dense_4h_to_h.weight.device,\n            empty_init=empty_init\n        )\n    return model\n"
  },
  {
    "path": "chatglm/tokenization_chatglm.py",
    "content": "\"\"\"Tokenization classes for ChatGLM.\"\"\"\nfrom typing import List, Optional, Union\nimport os\n\nfrom transformers.tokenization_utils import PreTrainedTokenizer\nfrom transformers.utils import logging, PaddingStrategy\nfrom transformers.tokenization_utils_base import EncodedInput, BatchEncoding\nfrom typing import Dict\nimport sentencepiece as spm\nimport numpy as np\n\nlogger = logging.get_logger(__name__)\n\nPRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {\n    \"THUDM/chatglm-6b\": 2048,\n}\n\n\nclass TextTokenizer:\n    def __init__(self, model_path):\n        self.sp = spm.SentencePieceProcessor()\n        self.sp.Load(model_path)\n        self.num_tokens = self.sp.vocab_size()\n\n    def encode(self, text):\n        return self.sp.EncodeAsIds(text)\n\n    def decode(self, ids: List[int]):\n        return self.sp.DecodeIds(ids)\n\n    def tokenize(self, text):\n        return self.sp.EncodeAsPieces(text)\n\n    def convert_tokens_to_ids(self, tokens):\n        return [self.sp.PieceToId(token) for token in tokens]\n\n    def convert_token_to_id(self, token):\n        return self.sp.PieceToId(token)\n\n    def convert_id_to_token(self, idx):\n        return self.sp.IdToPiece(idx)\n\n    def __len__(self):\n        return self.num_tokens\n\n\nclass SPTokenizer:\n    def __init__(\n            self,\n            vocab_file,\n            num_image_tokens=20000,\n            max_blank_length=80,\n            byte_fallback=True,\n    ):\n        assert vocab_file is not None\n        self.vocab_file = vocab_file\n        self.num_image_tokens = num_image_tokens\n        self.special_tokens = [\"[MASK]\", \"[gMASK]\", \"[sMASK]\", \"<unused_0>\", \"<sop>\", \"<eop>\", \"<ENC>\", \"<dBLOCK>\"]\n        self.max_blank_length = max_blank_length\n        self.byte_fallback = byte_fallback\n        self.text_tokenizer = TextTokenizer(vocab_file)\n\n    def _get_text_tokenizer(self):\n        return self.text_tokenizer\n\n    @staticmethod\n    def get_blank_token(length: int):\n        assert length >= 2\n        return f\"<|blank_{length}|>\"\n\n    @staticmethod\n    def get_tab_token():\n        return f\"<|tab|>\"\n\n    @property\n    def num_text_tokens(self):\n        return self.text_tokenizer.num_tokens\n\n    @property\n    def num_tokens(self):\n        return self.num_image_tokens + self.num_text_tokens\n\n    @staticmethod\n    def _encode_whitespaces(text: str, max_len: int = 80):\n        text = text.replace(\"\\t\", SPTokenizer.get_tab_token())\n        for i in range(max_len, 1, -1):\n            text = text.replace(\" \" * i, SPTokenizer.get_blank_token(i))\n        return text\n\n    def _preprocess(self, text: str, linebreak=True, whitespaces=True):\n        if linebreak:\n            text = text.replace(\"\\n\", \"<n>\")\n        if whitespaces:\n            text = self._encode_whitespaces(text, max_len=self.max_blank_length)\n        return text\n\n    def encode(\n            self, text: str, linebreak=True, whitespaces=True, add_dummy_prefix=True\n    ) -> List[int]:\n        \"\"\"\n        @param text: Text to encode.\n        @param linebreak: Whether to encode newline (\\n) in text.\n        @param whitespaces: Whether to encode multiple whitespaces or tab in text, useful for source code encoding.\n        @param special_tokens: Whether to encode special token ([MASK], [gMASK], etc.) in text.\n        @param add_dummy_prefix: Whether to add dummy blank space in the beginning.\n        \"\"\"\n        text = self._preprocess(text, linebreak, whitespaces)\n        if not add_dummy_prefix:\n            text = \"<n>\" + text\n        tmp = self._get_text_tokenizer().encode(text)\n        tokens = [x + self.num_image_tokens for x in tmp]\n        return tokens if add_dummy_prefix else tokens[2:]\n\n    def decode(self, text_ids: List[int]) -> str:\n        ids = [int(_id) - self.num_image_tokens for _id in text_ids]\n        ids = [_id for _id in ids if _id >= 0]\n        text = self._get_text_tokenizer().decode(ids)\n        text = text.replace(\"<n>\", \"\\n\")\n        text = text.replace(SPTokenizer.get_tab_token(), \"\\t\")\n        for i in range(2, self.max_blank_length + 1):\n            text = text.replace(self.get_blank_token(i), \" \" * i)\n        return text\n\n    def tokenize(\n            self, text: str, linebreak=True, whitespaces=True, add_dummy_prefix=True\n    ) -> List[str]:\n        \"\"\"\n        @param text: Text to encode.\n        @param linebreak: Whether to encode newline (\\n) in text.\n        @param whitespaces: Whether to encode multiple whitespaces or tab in text, useful for source code encoding.\n        @param special_tokens: Whether to encode special token ([MASK], [gMASK], etc.) in text.\n        @param add_dummy_prefix: Whether to add dummy blank space in the beginning.\n        \"\"\"\n        text = self._preprocess(text, linebreak, whitespaces)\n        if not add_dummy_prefix:\n            text = \"<n>\" + text\n        tokens = self._get_text_tokenizer().tokenize(text)\n        return tokens if add_dummy_prefix else tokens[2:]\n\n    def __getitem__(self, x: Union[int, str]):\n        if isinstance(x, int):\n            if x < self.num_image_tokens:\n                return \"<image_{}>\".format(x)\n            else:\n                return self.text_tokenizer.convert_id_to_token(x - self.num_image_tokens)\n        elif isinstance(x, str):\n            if x.startswith(\"<image_\") and x.endswith(\">\") and x[7:-1].isdigit():\n                return int(x[7:-1])\n            else:\n                return self.text_tokenizer.convert_token_to_id(x) + self.num_image_tokens\n        else:\n            raise ValueError(\"The key should be str or int.\")\n\n\nclass ChatGLMTokenizer(PreTrainedTokenizer):\n    \"\"\"\n    Construct a ChatGLM tokenizer. Based on byte-level Byte-Pair-Encoding.\n\n    Args:\n        vocab_file (`str`):\n            Path to the vocabulary file.\n    \"\"\"\n\n    vocab_files_names = {\"vocab_file\": \"ice_text.model\"}\n    max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n    model_input_names = [\"input_ids\", \"attention_mask\", \"position_ids\"]\n\n    def __init__(\n            self,\n            vocab_file,\n            do_lower_case=False,\n            remove_space=False,\n            bos_token='<sop>',\n            eos_token='<eop>',\n            end_token='</s>',\n            mask_token='[MASK]',\n            gmask_token='[gMASK]',\n            padding_side=\"left\",\n            pad_token=\"<pad>\",\n            unk_token=\"<unk>\",\n            num_image_tokens=20000,\n            **kwargs\n    ) -> None:\n        super().__init__(\n            do_lower_case=do_lower_case,\n            remove_space=remove_space,\n            padding_side=padding_side,\n            bos_token=bos_token,\n            eos_token=eos_token,\n            end_token=end_token,\n            mask_token=mask_token,\n            gmask_token=gmask_token,\n            pad_token=pad_token,\n            unk_token=unk_token,\n            num_image_tokens=num_image_tokens,\n            **kwargs\n        )\n\n        self.do_lower_case = do_lower_case\n        self.remove_space = remove_space\n        self.vocab_file = vocab_file\n\n        self.bos_token = bos_token\n        self.eos_token = eos_token\n        self.end_token = end_token\n        self.mask_token = mask_token\n        self.gmask_token = gmask_token\n\n        self.sp_tokenizer = SPTokenizer(vocab_file, num_image_tokens=num_image_tokens)\n\n        \"\"\" Initialisation \"\"\"\n\n    @property\n    def gmask_token_id(self) -> Optional[int]:\n        if self.gmask_token is None:\n            return None\n        return self.convert_tokens_to_ids(self.gmask_token)\n\n    @property\n    def end_token_id(self) -> Optional[int]:\n        \"\"\"\n        `Optional[int]`: Id of the end of context token in the vocabulary. Returns `None` if the token has not been\n        set.\n        \"\"\"\n        if self.end_token is None:\n            return None\n        return self.convert_tokens_to_ids(self.end_token)\n\n    @property\n    def vocab_size(self):\n        \"\"\" Returns vocab size \"\"\"\n        return self.sp_tokenizer.num_tokens\n\n    def get_vocab(self):\n        \"\"\" Returns vocab as a dict \"\"\"\n        vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)}\n        vocab.update(self.added_tokens_encoder)\n        return vocab\n\n    def preprocess_text(self, inputs):\n        if self.remove_space:\n            outputs = \" \".join(inputs.strip().split())\n        else:\n            outputs = inputs\n\n        if self.do_lower_case:\n            outputs = outputs.lower()\n\n        return outputs\n\n    def _tokenize(self, text, **kwargs):\n        \"\"\" Returns a tokenized string. \"\"\"\n        text = self.preprocess_text(text)\n\n        seq = self.sp_tokenizer.tokenize(text)\n\n        return seq\n\n    def _decode(\n            self,\n            token_ids: Union[int, List[int]],\n            skip_special_tokens: bool = False,\n            clean_up_tokenization_spaces: bool = True,\n            **kwargs\n    ) -> str:\n        if isinstance(token_ids, int):\n            token_ids = [token_ids]\n        if len(token_ids) == 0:\n            return \"\"\n        if self.pad_token_id in token_ids:  # remove pad\n            token_ids = list(filter((self.pad_token_id).__ne__, token_ids))\n        return self.sp_tokenizer.decode(token_ids)\n\n    def _convert_token_to_id(self, token):\n        \"\"\" Converts a token (str) in an id using the vocab. \"\"\"\n        return self.sp_tokenizer[token]\n\n    def _convert_id_to_token(self, index):\n        \"\"\"Converts an index (integer) in a token (str) using the vocab.\"\"\"\n        return self.sp_tokenizer[index]\n\n    def save_vocabulary(self, save_directory, filename_prefix=None):\n        \"\"\"\n        Save the vocabulary and special tokens file to a directory.\n\n        Args:\n            save_directory (`str`):\n                The directory in which to save the vocabulary.\n            filename_prefix (`str`, *optional*):\n                An optional prefix to add to the named of the saved files.\n\n        Returns:\n            `Tuple(str)`: Paths to the files saved.\n        \"\"\"\n        if os.path.isdir(save_directory):\n            vocab_file = os.path.join(\n                save_directory, self.vocab_files_names[\"vocab_file\"]\n            )\n        else:\n            vocab_file = save_directory\n\n        with open(self.vocab_file, 'rb') as fin:\n            proto_str = fin.read()\n\n        with open(vocab_file, \"wb\") as writer:\n            writer.write(proto_str)\n\n        return (vocab_file,)\n\n    def build_inputs_with_special_tokens(\n            self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None\n    ) -> List[int]:\n        \"\"\"\n        Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\n        adding special tokens. A BERT sequence has the following format:\n\n        - single sequence: `[CLS] X [SEP]`\n        - pair of sequences: `[CLS] A [SEP] B [SEP]`\n\n        Args:\n            token_ids_0 (`List[int]`):\n                List of IDs to which the special tokens will be added.\n            token_ids_1 (`List[int]`, *optional*):\n                Optional second list of IDs for sequence pairs.\n\n        Returns:\n            `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.\n        \"\"\"\n        gmask_id = self.sp_tokenizer[self.gmask_token]\n        eos_id = self.sp_tokenizer[self.eos_token]\n        token_ids_0 = token_ids_0 + [gmask_id, self.sp_tokenizer[self.bos_token]]\n        if token_ids_1 is not None:\n            token_ids_0 = token_ids_0 + token_ids_1 + [eos_id]\n        return token_ids_0\n\n    def _pad(\n            self,\n            encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],\n            max_length: Optional[int] = None,\n            padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,\n            pad_to_multiple_of: Optional[int] = None,\n            return_attention_mask: Optional[bool] = None,\n    ) -> dict:\n        \"\"\"\n        Pad encoded inputs (on left/right and up to predefined length or max length in the batch)\n\n        Args:\n            encoded_inputs:\n                Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).\n            max_length: maximum length of the returned list and optionally padding length (see below).\n                Will truncate by taking into account the special tokens.\n            padding_strategy: PaddingStrategy to use for padding.\n\n                - PaddingStrategy.LONGEST Pad to the longest sequence in the batch\n                - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)\n                - PaddingStrategy.DO_NOT_PAD: Do not pad\n                The tokenizer padding sides are defined in self.padding_side:\n\n                    - 'left': pads on the left of the sequences\n                    - 'right': pads on the right of the sequences\n            pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.\n                This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability\n                `>= 7.5` (Volta).\n            return_attention_mask:\n                (optional) Set to False to avoid returning attention mask (default: set to model specifics)\n        \"\"\"\n        # Load from model defaults\n        bos_token_id = self.sp_tokenizer[self.bos_token]\n        mask_token_id = self.sp_tokenizer[self.mask_token]\n        gmask_token_id = self.sp_tokenizer[self.gmask_token]\n        assert self.padding_side == \"left\"\n\n        required_input = encoded_inputs[self.model_input_names[0]]\n        seq_length = len(required_input)\n\n        if padding_strategy == PaddingStrategy.LONGEST:\n            max_length = len(required_input)\n\n        if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):\n            max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of\n\n        needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length\n\n        # Initialize attention mask if not present.\n        if max_length is not None:\n            if \"attention_mask\" not in encoded_inputs:\n                if bos_token_id in required_input:\n                    context_length = required_input.index(bos_token_id)\n                else:\n                    context_length = seq_length\n                attention_mask = np.ones((1, seq_length, seq_length))\n                attention_mask = np.tril(attention_mask)\n                attention_mask[:, :, :context_length] = 1\n                attention_mask = np.bool_(attention_mask < 0.5)\n                encoded_inputs[\"attention_mask\"] = attention_mask\n\n            if \"position_ids\" not in encoded_inputs:\n                if bos_token_id in required_input:\n                    context_length = required_input.index(bos_token_id)\n                else:\n                    context_length = seq_length\n                position_ids = np.arange(seq_length, dtype=np.int64)\n                mask_token = mask_token_id if mask_token_id in required_input else gmask_token_id\n                if mask_token in required_input:\n                    mask_position = required_input.index(mask_token)\n                    position_ids[context_length:] = mask_position\n                block_position_ids = np.concatenate(\n                    [np.zeros(context_length, dtype=np.int64),\n                     np.arange(1, seq_length - context_length + 1, dtype=np.int64)])\n                encoded_inputs[\"position_ids\"] = np.stack([position_ids, block_position_ids], axis=0)\n\n        if needs_to_be_padded:\n            difference = max_length - len(required_input)\n\n            if \"attention_mask\" in encoded_inputs:\n                encoded_inputs[\"attention_mask\"] = np.pad(encoded_inputs[\"attention_mask\"],\n                                                          pad_width=[(0, 0), (difference, 0), (difference, 0)],\n                                                          mode='constant', constant_values=True)\n            if \"token_type_ids\" in encoded_inputs:\n                encoded_inputs[\"token_type_ids\"] = [self.pad_token_type_id] * difference + encoded_inputs[\n                    \"token_type_ids\"\n                ]\n            if \"special_tokens_mask\" in encoded_inputs:\n                encoded_inputs[\"special_tokens_mask\"] = [1] * difference + encoded_inputs[\"special_tokens_mask\"]\n            if \"position_ids\" in encoded_inputs:\n                encoded_inputs[\"position_ids\"] = np.pad(encoded_inputs[\"position_ids\"],\n                                                        pad_width=[(0, 0), (difference, 0)])\n            encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input\n\n        return encoded_inputs\n"
  },
  {
    "path": "chatglm2/configuration_chatglm.py",
    "content": "from transformers import PretrainedConfig\n\n\nclass ChatGLMConfig(PretrainedConfig):\n    model_type = \"chatglm\"\n    def __init__(\n        self,\n        num_layers=28,\n        padded_vocab_size=65024,\n        hidden_size=4096,\n        ffn_hidden_size=13696,\n        kv_channels=128,\n        num_attention_heads=32,\n        seq_length=2048,\n        hidden_dropout=0.0,\n        attention_dropout=0.0,\n        layernorm_epsilon=1e-5,\n        rmsnorm=True,\n        apply_residual_connection_post_layernorm=False,\n        post_layer_norm=True,\n        add_bias_linear=False,\n        add_qkv_bias=False,\n        interleaved_qkv=False,\n        bias_dropout_fusion=True,\n        multi_query_attention=False,\n        multi_query_group_num=1,\n        apply_query_key_layer_scaling=True,\n        attention_softmax_in_fp32=True,\n        fp32_residual_connection=False,\n        quantization_bit=0,\n        **kwargs\n    ):\n        self.num_layers = num_layers\n        self.padded_vocab_size = padded_vocab_size\n        self.hidden_size = hidden_size\n        self.ffn_hidden_size = ffn_hidden_size\n        self.kv_channels = kv_channels\n        self.num_attention_heads = num_attention_heads\n        self.seq_length = seq_length\n        self.hidden_dropout = hidden_dropout\n        self.attention_dropout = attention_dropout\n        self.layernorm_epsilon = layernorm_epsilon\n        self.rmsnorm = rmsnorm\n        self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm\n        self.post_layer_norm = post_layer_norm\n        self.add_bias_linear = add_bias_linear\n        self.add_qkv_bias = add_qkv_bias\n        self.bias_dropout_fusion = bias_dropout_fusion\n        self.multi_query_attention = multi_query_attention\n        self.multi_query_group_num = multi_query_group_num\n        self.apply_query_key_layer_scaling = apply_query_key_layer_scaling\n        self.attention_softmax_in_fp32 = attention_softmax_in_fp32\n        self.fp32_residual_connection = fp32_residual_connection\n        self.quantization_bit = quantization_bit\n        super().__init__(**kwargs)"
  },
  {
    "path": "chatglm2/modeling_chatglm.py",
    "content": "\"\"\" PyTorch ChatGLM model. \"\"\"\n\nimport math\nimport copy\nimport warnings\nimport re\nimport sys\n\nimport torch\nimport torch.utils.checkpoint\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss, LayerNorm\nfrom torch.nn.utils import skip_init\nfrom typing import Optional, Tuple, Union, List, Callable, Dict, Any\n\nfrom transformers.modeling_outputs import (\n    BaseModelOutputWithPast,\n    CausalLMOutputWithPast,\n)\nfrom transformers.modeling_utils import PreTrainedModel\nfrom transformers.utils import logging\nfrom transformers.generation.logits_process import LogitsProcessor\nfrom transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig, ModelOutput\n\nfrom .configuration_chatglm import ChatGLMConfig\n\n# flags required to enable jit fusion kernels\n\nif sys.platform != 'darwin':\n    torch._C._jit_set_profiling_mode(False)\n    torch._C._jit_set_profiling_executor(False)\n    torch._C._jit_override_can_fuse_on_cpu(True)\n    torch._C._jit_override_can_fuse_on_gpu(True)\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"THUDM/ChatGLM2-6B\"\n_CONFIG_FOR_DOC = \"ChatGLM6BConfig\"\n\nCHATGLM_6B_PRETRAINED_MODEL_ARCHIVE_LIST = [\n    \"THUDM/chatglm2-6b\",\n    # See all ChatGLM models at https://huggingface.co/models?filter=chatglm\n]\n\n\ndef default_init(cls, *args, **kwargs):\n    return cls(*args, **kwargs)\n\n\nclass InvalidScoreLogitsProcessor(LogitsProcessor):\n    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:\n        if torch.isnan(scores).any() or torch.isinf(scores).any():\n            scores.zero_()\n            scores[..., 5] = 5e4\n        return scores\n\n\ndef split_tensor_along_last_dim(\n        tensor: torch.Tensor,\n        num_partitions: int,\n        contiguous_split_chunks: bool = False,\n) -> List[torch.Tensor]:\n    \"\"\"Split a tensor along its last dimension.\n\n    Arguments:\n        tensor: input tensor.\n        num_partitions: number of partitions to split the tensor\n        contiguous_split_chunks: If True, make each chunk contiguous\n                                 in memory.\n\n    Returns:\n        A list of Tensors\n    \"\"\"\n    # Get the size and dimension.\n    last_dim = tensor.dim() - 1\n    last_dim_size = tensor.size()[last_dim] // num_partitions\n    # Split.\n    tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)\n    # Note: torch.split does not create contiguous tensors by default.\n    if contiguous_split_chunks:\n        return tuple(chunk.contiguous() for chunk in tensor_list)\n\n    return tensor_list\n\n\nclass RotaryEmbedding(nn.Module):\n    def __init__(self, dim, original_impl=False, device=None, dtype=None):\n        super().__init__()\n        inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, device=device).to(dtype=dtype) / dim))\n        self.register_buffer(\"inv_freq\", inv_freq)\n        self.dim = dim\n        self.original_impl = original_impl\n\n    def forward_impl(\n            self, seq_len: int, n_elem: int, dtype: torch.dtype, device: torch.device, base: int = 10000\n    ):\n        \"\"\"Enhanced Transformer with Rotary Position Embedding.\n\n        Derived from: https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/\n        transformers/rope/__init__.py. MIT License:\n        https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/license.\n        \"\"\"\n        # $\\Theta = {\\theta_i = 10000^{\\frac{2(i-1)}{d}}, i \\in [1, 2, ..., \\frac{d}{2}]}$\n        theta = 1.0 / (base ** (torch.arange(0, n_elem, 2, dtype=dtype, device=device) / n_elem))\n\n        # Create position indexes `[0, 1, ..., seq_len - 1]`\n        seq_idx = torch.arange(seq_len, dtype=dtype, device=device)\n\n        # Calculate the product of position index and $\\theta_i$\n        idx_theta = torch.outer(seq_idx, theta).float()\n\n        cache = torch.stack([torch.cos(idx_theta), torch.sin(idx_theta)], dim=-1)\n\n        # this is to mimic the behaviour of complex32, else we will get different results\n        if dtype in (torch.float16, torch.bfloat16, torch.int8):\n            cache = cache.bfloat16() if dtype == torch.bfloat16 else cache.half()\n        return cache\n\n    def forward(self, max_seq_len, offset=0):\n        return self.forward_impl(\n            max_seq_len, self.dim, dtype=self.inv_freq.dtype, device=self.inv_freq.device\n        )\n\n\n@torch.jit.script\ndef apply_rotary_pos_emb(x: torch.Tensor, rope_cache: torch.Tensor) -> torch.Tensor:\n    # x: [sq, b, np, hn]\n    sq, b, np, hn = x.size(0), x.size(1), x.size(2), x.size(3)\n    rot_dim = rope_cache.shape[-2] * 2\n    x, x_pass = x[..., :rot_dim], x[..., rot_dim:]\n    # truncate to support variable sizes\n    rope_cache = rope_cache[:sq]\n    xshaped = x.reshape(sq, -1, np, rot_dim // 2, 2)\n    rope_cache = rope_cache.view(sq, -1, 1, xshaped.size(3), 2)\n    x_out2 = torch.stack(\n        [\n            xshaped[..., 0] * rope_cache[..., 0] - xshaped[..., 1] * rope_cache[..., 1],\n            xshaped[..., 1] * rope_cache[..., 0] + xshaped[..., 0] * rope_cache[..., 1],\n        ],\n        -1,\n    )\n    x_out2 = x_out2.flatten(3)\n    return torch.cat((x_out2, x_pass), dim=-1)\n\n\nclass RMSNorm(torch.nn.Module):\n    def __init__(self, normalized_shape, eps=1e-5, device=None, dtype=None, **kwargs):\n        super().__init__()\n        self.weight = torch.nn.Parameter(torch.empty(normalized_shape, device=device, dtype=dtype))\n        self.eps = eps\n\n    def forward(self, hidden_states: torch.Tensor):\n        input_dtype = hidden_states.dtype\n        variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)\n        hidden_states = hidden_states * torch.rsqrt(variance + self.eps)\n\n        return (self.weight * hidden_states).to(input_dtype)\n\n\nclass CoreAttention(torch.nn.Module):\n    def __init__(self, config: ChatGLMConfig, layer_number):\n        super(CoreAttention, self).__init__()\n\n        self.apply_query_key_layer_scaling = config.apply_query_key_layer_scaling\n        self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32\n        if self.apply_query_key_layer_scaling:\n            self.attention_softmax_in_fp32 = True\n        self.layer_number = max(1, layer_number)\n\n        projection_size = config.kv_channels * config.num_attention_heads\n\n        # Per attention head and per partition values.\n        self.hidden_size_per_partition = projection_size\n        self.hidden_size_per_attention_head = projection_size // config.num_attention_heads\n        self.num_attention_heads_per_partition = config.num_attention_heads\n\n        coeff = None\n        self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)\n        if self.apply_query_key_layer_scaling:\n            coeff = self.layer_number\n            self.norm_factor *= coeff\n        self.coeff = coeff\n\n        self.attention_dropout = torch.nn.Dropout(config.attention_dropout)\n\n    def forward(self, query_layer, key_layer, value_layer, attention_mask):\n        pytorch_major_version = int(torch.__version__.split('.')[0])\n        if pytorch_major_version >= 2:\n            query_layer, key_layer, value_layer = [k.permute(1, 2, 0, 3) for k in [query_layer, key_layer, value_layer]]\n            if attention_mask is None and query_layer.shape[2] == key_layer.shape[2]:\n                context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer,\n                                                                                 is_causal=True)\n            else:\n                if attention_mask is not None:\n                    attention_mask = ~attention_mask\n                context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer,\n                                                                                 attention_mask)\n            context_layer = context_layer.permute(2, 0, 1, 3)\n            new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)\n            context_layer = context_layer.reshape(*new_context_layer_shape)\n        else:\n            # Raw attention scores\n\n            # [b, np, sq, sk]\n            output_size = (query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0))\n\n            # [sq, b, np, hn] -> [sq, b * np, hn]\n            query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1)\n            # [sk, b, np, hn] -> [sk, b * np, hn]\n            key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1)\n\n            # preallocting input tensor: [b * np, sq, sk]\n            matmul_input_buffer = torch.empty(\n                output_size[0] * output_size[1], output_size[2], output_size[3], dtype=query_layer.dtype,\n                device=query_layer.device\n            )\n\n            # Raw attention scores. [b * np, sq, sk]\n            matmul_result = torch.baddbmm(\n                matmul_input_buffer,\n                query_layer.transpose(0, 1),  # [b * np, sq, hn]\n                key_layer.transpose(0, 1).transpose(1, 2),  # [b * np, hn, sk]\n                beta=0.0,\n                alpha=(1.0 / self.norm_factor),\n            )\n\n            # change view to [b, np, sq, sk]\n            attention_scores = matmul_result.view(*output_size)\n\n            # ===========================\n            # Attention probs and dropout\n            # ===========================\n\n            # attention scores and attention mask [b, np, sq, sk]\n            if self.attention_softmax_in_fp32:\n                attention_scores = attention_scores.float()\n            if self.coeff is not None:\n                attention_scores = attention_scores * self.coeff\n            if attention_mask is None and attention_scores.shape[2] == attention_scores.shape[3]:\n                attention_mask = torch.ones(output_size[0], 1, output_size[2], output_size[3],\n                                            device=attention_scores.device, dtype=torch.bool)\n                attention_mask.tril_()\n                attention_mask = ~attention_mask\n            if attention_mask is not None:\n                attention_scores = attention_scores.masked_fill(attention_mask, float(\"-inf\"))\n            attention_probs = F.softmax(attention_scores, dim=-1)\n            attention_probs = attention_probs.type_as(value_layer)\n\n            # This is actually dropping out entire tokens to attend to, which might\n            # seem a bit unusual, but is taken from the original Transformer paper.\n            attention_probs = self.attention_dropout(attention_probs)\n            # =========================\n            # Context layer. [sq, b, hp]\n            # =========================\n\n            # value_layer -> context layer.\n            # [sk, b, np, hn] --> [b, np, sq, hn]\n\n            # context layer shape: [b, np, sq, hn]\n            output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3))\n            # change view [sk, b * np, hn]\n            value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1)\n            # change view [b * np, sq, sk]\n            attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1)\n            # matmul: [b * np, sq, hn]\n            context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))\n            # change view [b, np, sq, hn]\n            context_layer = context_layer.view(*output_size)\n            # [b, np, sq, hn] --> [sq, b, np, hn]\n            context_layer = context_layer.permute(2, 0, 1, 3).contiguous()\n            # [sq, b, np, hn] --> [sq, b, hp]\n            new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)\n            context_layer = context_layer.view(*new_context_layer_shape)\n\n        return context_layer\n\n\nclass SelfAttention(torch.nn.Module):\n    \"\"\"Parallel self-attention layer abstract class.\n\n    Self-attention layer takes input with size [s, b, h]\n    and returns output of the same size.\n    \"\"\"\n\n    def __init__(self, config: ChatGLMConfig, layer_number, device=None):\n        super(SelfAttention, self).__init__()\n        self.layer_number = max(1, layer_number)\n\n        self.projection_size = config.kv_channels * config.num_attention_heads\n\n        # Per attention head and per partition values.\n        self.hidden_size_per_attention_head = self.projection_size // config.num_attention_heads\n        self.num_attention_heads_per_partition = config.num_attention_heads\n\n        self.multi_query_attention = config.multi_query_attention\n        self.qkv_hidden_size = 3 * self.projection_size\n        if self.multi_query_attention:\n            self.num_multi_query_groups_per_partition = config.multi_query_group_num\n            self.qkv_hidden_size = (\n                    self.projection_size + 2 * self.hidden_size_per_attention_head * config.multi_query_group_num\n            )\n        self.query_key_value = nn.Linear(config.hidden_size, self.qkv_hidden_size,\n                                         bias=config.add_bias_linear or config.add_qkv_bias,\n                                         device=device, **_config_to_kwargs(config)\n                                         )\n\n        self.core_attention = CoreAttention(config, self.layer_number)\n\n        # Output.\n        self.dense = nn.Linear(self.projection_size, config.hidden_size, bias=config.add_bias_linear,\n                               device=device, **_config_to_kwargs(config)\n                               )\n\n    def _allocate_memory(self, inference_max_sequence_len, batch_size, device=None, dtype=None):\n        if self.multi_query_attention:\n            num_attention_heads = self.num_multi_query_groups_per_partition\n        else:\n            num_attention_heads = self.num_attention_heads_per_partition\n        return torch.empty(\n            inference_max_sequence_len,\n            batch_size,\n            num_attention_heads,\n            self.hidden_size_per_attention_head,\n            dtype=dtype,\n            device=device,\n        )\n\n    def forward(\n            self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True\n    ):\n        # hidden_states: [sq, b, h]\n\n        # =================================================\n        # Pre-allocate memory for key-values for inference.\n        # =================================================\n        # =====================\n        # Query, Key, and Value\n        # =====================\n\n        # Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)]\n        mixed_x_layer = self.query_key_value(hidden_states)\n\n        if self.multi_query_attention:\n            (query_layer, key_layer, value_layer) = mixed_x_layer.split(\n                [\n                    self.num_attention_heads_per_partition * self.hidden_size_per_attention_head,\n                    self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head,\n                    self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head,\n                ],\n                dim=-1,\n            )\n            query_layer = query_layer.view(\n                query_layer.size()[:-1] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head)\n            )\n            key_layer = key_layer.view(\n                key_layer.size()[:-1] + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head)\n            )\n            value_layer = value_layer.view(\n                value_layer.size()[:-1]\n                + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head)\n            )\n        else:\n            new_tensor_shape = mixed_x_layer.size()[:-1] + \\\n                               (self.num_attention_heads_per_partition,\n                                3 * self.hidden_size_per_attention_head)\n            mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)\n\n            # [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn]\n            (query_layer, key_layer, value_layer) = split_tensor_along_last_dim(mixed_x_layer, 3)\n\n        # apply relative positional encoding (rotary embedding)\n        if rotary_pos_emb is not None:\n            query_layer = apply_rotary_pos_emb(query_layer, rotary_pos_emb)\n            key_layer = apply_rotary_pos_emb(key_layer, rotary_pos_emb)\n\n        # adjust key and value for inference\n        if use_cache:\n            if kv_cache is not None:\n                cache_k, cache_v = kv_cache\n                key_layer = torch.cat((cache_k, key_layer), dim=0)\n                value_layer = torch.cat((cache_v, value_layer), dim=0)\n            kv_cache = (key_layer, value_layer)\n        else:\n            kv_cache = None\n\n        if self.multi_query_attention:\n            key_layer = key_layer.unsqueeze(-2)\n            key_layer = key_layer.expand(\n                -1, -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1\n            )\n            key_layer = key_layer.contiguous().view(\n                key_layer.size()[:2] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head)\n            )\n            value_layer = value_layer.unsqueeze(-2)\n            value_layer = value_layer.expand(\n                -1, -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1\n            )\n            value_layer = value_layer.contiguous().view(\n                value_layer.size()[:2] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head)\n            )\n\n        # ==================================\n        # core attention computation\n        # ==================================\n\n        context_layer = self.core_attention(query_layer, key_layer, value_layer, attention_mask)\n\n        # =================\n        # Output. [sq, b, h]\n        # =================\n\n        output = self.dense(context_layer)\n\n        return output, kv_cache\n\n\ndef _config_to_kwargs(args):\n    common_kwargs = {\n        \"dtype\": args.torch_dtype,\n    }\n    return common_kwargs\n\n\nclass MLP(torch.nn.Module):\n    \"\"\"MLP.\n\n    MLP will take the input with h hidden state, project it to 4*h\n    hidden dimension, perform nonlinear transformation, and project the\n    state back into h hidden dimension.\n    \"\"\"\n\n    def __init__(self, config: ChatGLMConfig, device=None):\n        super(MLP, self).__init__()\n\n        self.add_bias = config.add_bias_linear\n\n        # Project to 4h. If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf\n        self.dense_h_to_4h = nn.Linear(\n            config.hidden_size,\n            config.ffn_hidden_size * 2,\n            bias=self.add_bias,\n            device=device,\n            **_config_to_kwargs(config)\n        )\n\n        def swiglu(x):\n            x = torch.chunk(x, 2, dim=-1)\n            return F.silu(x[0]) * x[1]\n\n        self.activation_func = swiglu\n\n        # Project back to h.\n        self.dense_4h_to_h = nn.Linear(\n            config.ffn_hidden_size,\n            config.hidden_size,\n            bias=self.add_bias,\n            device=device,\n            **_config_to_kwargs(config)\n        )\n\n    def forward(self, hidden_states):\n        # [s, b, 4hp]\n        intermediate_parallel = self.dense_h_to_4h(hidden_states)\n        intermediate_parallel = self.activation_func(intermediate_parallel)\n        # [s, b, h]\n        output = self.dense_4h_to_h(intermediate_parallel)\n        return output\n\n\nclass GLMBlock(torch.nn.Module):\n    \"\"\"A single transformer layer.\n\n    Transformer layer takes input with size [s, b, h] and returns an\n    output of the same size.\n    \"\"\"\n\n    def __init__(self, config: ChatGLMConfig, layer_number, device=None):\n        super(GLMBlock, self).__init__()\n        self.layer_number = layer_number\n\n        self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm\n\n        self.fp32_residual_connection = config.fp32_residual_connection\n\n        LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm\n        # Layernorm on the input data.\n        self.input_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device,\n                                             dtype=config.torch_dtype)\n\n        # Self attention.\n        self.self_attention = SelfAttention(config, layer_number, device=device)\n        self.hidden_dropout = config.hidden_dropout\n\n        # Layernorm on the attention output\n        self.post_attention_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device,\n                                                      dtype=config.torch_dtype)\n\n        # MLP\n        self.mlp = MLP(config, device=device)\n\n    def forward(\n            self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True,\n    ):\n        # hidden_states: [s, b, h]\n\n        # Layer norm at the beginning of the transformer layer.\n        layernorm_output = self.input_layernorm(hidden_states)\n        # Self attention.\n        attention_output, kv_cache = self.self_attention(\n            layernorm_output,\n            attention_mask,\n            rotary_pos_emb,\n            kv_cache=kv_cache,\n            use_cache=use_cache\n        )\n\n        # Residual connection.\n        if self.apply_residual_connection_post_layernorm:\n            residual = layernorm_output\n        else:\n            residual = hidden_states\n\n        layernorm_input = torch.nn.functional.dropout(attention_output, p=self.hidden_dropout, training=self.training)\n        layernorm_input = residual + layernorm_input\n\n        # Layer norm post the self attention.\n        layernorm_output = self.post_attention_layernorm(layernorm_input)\n\n        # MLP.\n        mlp_output = self.mlp(layernorm_output)\n\n        # Second residual connection.\n        if self.apply_residual_connection_post_layernorm:\n            residual = layernorm_output\n        else:\n            residual = layernorm_input\n\n        output = torch.nn.functional.dropout(mlp_output, p=self.hidden_dropout, training=self.training)\n        output = residual + output\n\n        return output, kv_cache\n\n\nclass GLMTransformer(torch.nn.Module):\n    \"\"\"Transformer class.\"\"\"\n\n    def __init__(self, config: ChatGLMConfig, device=None):\n        super(GLMTransformer, self).__init__()\n\n        self.fp32_residual_connection = config.fp32_residual_connection\n        self.post_layer_norm = config.post_layer_norm\n\n        # Number of layers.\n        self.num_layers = config.num_layers\n\n        # Transformer layers.\n        def build_layer(layer_number):\n            return GLMBlock(config, layer_number, device=device)\n\n        self.layers = torch.nn.ModuleList([build_layer(i + 1) for i in range(self.num_layers)])\n\n        if self.post_layer_norm:\n            LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm\n            # Final layer norm before output.\n            self.final_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device,\n                                                 dtype=config.torch_dtype)\n\n    def _get_layer(self, layer_number):\n        return self.layers[layer_number]\n\n    def forward(\n            self, hidden_states, attention_mask, rotary_pos_emb, kv_caches=None,\n            use_cache: Optional[bool] = True,\n            output_hidden_states: Optional[bool] = False,\n    ):\n        if not kv_caches:\n            kv_caches = [None for _ in range(self.num_layers)]\n        presents = () if use_cache else None\n        all_self_attentions = None\n        all_hidden_states = () if output_hidden_states else None\n        for index in range(self.num_layers):\n            if output_hidden_states:\n                all_hidden_states = all_hidden_states + (hidden_states,)\n\n            layer = self._get_layer(index)\n\n            hidden_states, kv_cache = layer(\n                hidden_states,\n                attention_mask,\n                rotary_pos_emb,\n                kv_cache=kv_caches[index],\n                use_cache=use_cache\n            )\n            if use_cache:\n                presents = presents + (kv_cache,)\n\n        if output_hidden_states:\n            all_hidden_states = all_hidden_states + (hidden_states,)\n\n        # Final layer norm.\n        if self.post_layer_norm:\n            hidden_states = self.final_layernorm(hidden_states)\n\n        return hidden_states, presents, all_hidden_states, all_self_attentions\n\n\nclass ChatGLMPreTrainedModel(PreTrainedModel):\n    \"\"\"\n    An abstract class to handle weights initialization and\n    a simple interface for downloading and loading pretrained models.\n    \"\"\"\n\n    is_parallelizable = False\n    supports_gradient_checkpointing = True\n    config_class = ChatGLMConfig\n    base_model_prefix = \"transformer\"\n    _no_split_modules = [\"GLMBlock\"]\n\n    def _init_weights(self, module: nn.Module):\n        \"\"\"Initialize the weights.\"\"\"\n        return\n\n    def get_masks(self, input_ids, past_key_values, padding_mask=None):\n        batch_size, seq_length = input_ids.shape\n        full_attention_mask = torch.ones(batch_size, seq_length, seq_length, device=input_ids.device)\n        full_attention_mask.tril_()\n        past_length = 0\n        if past_key_values:\n            past_length = past_key_values[0][0].shape[0]\n        if past_length:\n            full_attention_mask = torch.cat((torch.ones(batch_size, seq_length, past_length,\n                                                        device=input_ids.device), full_attention_mask), dim=-1)\n        if padding_mask is not None:\n            full_attention_mask = full_attention_mask * padding_mask.unsqueeze(1)\n        if not past_length and padding_mask is not None:\n            full_attention_mask -= padding_mask.unsqueeze(-1) - 1\n        full_attention_mask = (full_attention_mask < 0.5).bool()\n        full_attention_mask.unsqueeze_(1)\n        return full_attention_mask\n\n    def get_position_ids(self, input_ids, device):\n        batch_size, seq_length = input_ids.shape\n        position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1)\n        return position_ids\n\n    def _set_gradient_checkpointing(self, module, value=False):\n        if isinstance(module, ChatGLMModel):\n            module.gradient_checkpointing = value\n\n\nclass Embedding(torch.nn.Module):\n    \"\"\"Language model embeddings.\"\"\"\n\n    def __init__(self, config: ChatGLMConfig, device=None):\n        super(Embedding, self).__init__()\n\n        self.hidden_size = config.hidden_size\n        # Word embeddings (parallel).\n        self.word_embeddings = nn.Embedding(\n            config.padded_vocab_size,\n            self.hidden_size,\n            dtype=config.torch_dtype,\n            device=device\n        )\n        self.fp32_residual_connection = config.fp32_residual_connection\n\n    def forward(self, input_ids):\n        # Embeddings.\n        words_embeddings = self.word_embeddings(input_ids)\n        embeddings = words_embeddings\n        # Data format change to avoid explicit tranposes : [b s h] --> [s b h].\n        embeddings = embeddings.transpose(0, 1).contiguous()\n        # If the input flag for fp32 residual connection is set, convert for float.\n        if self.fp32_residual_connection:\n            embeddings = embeddings.float()\n        return embeddings\n\n\nclass ChatGLMModel(ChatGLMPreTrainedModel):\n    def __init__(self, config: ChatGLMConfig, device=None, empty_init=True):\n        super().__init__(config)\n        if empty_init:\n            init_method = skip_init\n        else:\n            init_method = default_init\n        init_kwargs = {}\n        if device is not None:\n            init_kwargs[\"device\"] = device\n        self.embedding = init_method(Embedding, config, **init_kwargs)\n\n        # Rotary positional embeddings\n        self.seq_length = config.seq_length\n        rotary_dim = (\n            config.hidden_size // config.num_attention_heads if config.kv_channels is None else config.kv_channels\n        )\n\n        self.rotary_pos_emb = RotaryEmbedding(rotary_dim // 2, original_impl=config.original_rope, device=device,\n                                              dtype=config.torch_dtype)\n        self.encoder = init_method(GLMTransformer, config, **init_kwargs)\n        self.output_layer = init_method(nn.Linear, config.hidden_size, config.padded_vocab_size, bias=False,\n                                        dtype=config.torch_dtype, **init_kwargs)\n        self.gradient_checkpointing = False\n\n    def get_input_embeddings(self):\n        return self.embedding.word_embeddings\n\n    def forward(\n            self,\n            input_ids,\n            position_ids: Optional[torch.Tensor] = None,\n            attention_mask: Optional[torch.BoolTensor] = None,\n            full_attention_mask: Optional[torch.BoolTensor] = None,\n            past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,\n            inputs_embeds: Optional[torch.Tensor] = None,\n            use_cache: Optional[bool] = None,\n            output_hidden_states: Optional[bool] = None,\n            return_dict: Optional[bool] = None,\n    ):\n        output_hidden_states = (\n            output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        )\n        use_cache = use_cache if use_cache is not None else self.config.use_cache\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n        batch_size, seq_length = input_ids.shape\n\n        if inputs_embeds is None:\n            inputs_embeds = self.embedding(input_ids)\n\n        if full_attention_mask is None:\n            if (attention_mask is not None and not attention_mask.all()) or (past_key_values and seq_length != 1):\n                full_attention_mask = self.get_masks(input_ids, past_key_values, padding_mask=attention_mask)\n\n        # Rotary positional embeddings\n        rotary_pos_emb = self.rotary_pos_emb(self.seq_length)\n        if position_ids is not None:\n            rotary_pos_emb = rotary_pos_emb[position_ids]\n        else:\n            rotary_pos_emb = rotary_pos_emb[None, :seq_length]\n        rotary_pos_emb = rotary_pos_emb.transpose(0, 1).contiguous()\n\n        # Run encoder.\n        hidden_states, presents, all_hidden_states, all_self_attentions = self.encoder(\n            inputs_embeds, full_attention_mask, rotary_pos_emb=rotary_pos_emb,\n            kv_caches=past_key_values, use_cache=use_cache, output_hidden_states=output_hidden_states\n        )\n\n        if not return_dict:\n            return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)\n\n        return BaseModelOutputWithPast(\n            last_hidden_state=hidden_states,\n            past_key_values=presents,\n            hidden_states=all_hidden_states,\n            attentions=all_self_attentions,\n        )\n\n    def quantize(self, weight_bit_width: int):\n        from .quantization import quantize\n        quantize(self.encoder, weight_bit_width)\n        return self\n\n\nclass ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):\n    def __init__(self, config: ChatGLMConfig, empty_init=True, device=None):\n        super().__init__(config)\n\n        self.max_sequence_length = config.max_length\n        self.transformer = ChatGLMModel(config, empty_init=empty_init, device=device)\n        self.config = config\n        self.quantized = False\n\n        if self.config.quantization_bit:\n            self.quantize(self.config.quantization_bit, empty_init=True)\n\n    def _update_model_kwargs_for_generation(\n            self,\n            outputs: ModelOutput,\n            model_kwargs: Dict[str, Any],\n            is_encoder_decoder: bool = False,\n            standardize_cache_format: bool = False,\n    ) -> Dict[str, Any]:\n        # update past_key_values\n        model_kwargs[\"past_key_values\"] = self._extract_past_from_model_output(\n            outputs, standardize_cache_format=standardize_cache_format\n        )\n\n        # update attention mask\n        if \"attention_mask\" in model_kwargs:\n            attention_mask = model_kwargs[\"attention_mask\"]\n            model_kwargs[\"attention_mask\"] = torch.cat(\n                [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1\n            )\n\n        # update position ids\n        if \"position_ids\" in model_kwargs:\n            position_ids = model_kwargs[\"position_ids\"]\n            new_position_id = position_ids[..., -1:].clone()\n            new_position_id += 1\n            model_kwargs[\"position_ids\"] = torch.cat(\n                [position_ids, new_position_id], dim=-1\n            )\n\n        model_kwargs[\"is_first_forward\"] = False\n        return model_kwargs\n\n    def prepare_inputs_for_generation(\n            self,\n            input_ids: torch.LongTensor,\n            past_key_values: Optional[torch.Tensor] = None,\n            attention_mask: Optional[torch.Tensor] = None,\n            position_ids: Optional[torch.Tensor] = None,\n            is_first_forward: bool = True,\n            **kwargs\n    ) -> dict:\n        # only last token for input_ids if past is not None\n        if position_ids is None:\n            position_ids = self.get_position_ids(input_ids, device=input_ids.device)\n        if not is_first_forward:\n            position_ids = position_ids[..., -1:]\n            input_ids = input_ids[:, -1:]\n        return {\n            \"input_ids\": input_ids,\n            \"past_key_values\": past_key_values,\n            \"position_ids\": position_ids,\n            \"attention_mask\": attention_mask,\n            \"return_last_logit\": True\n        }\n\n    def forward(\n            self,\n            input_ids: Optional[torch.Tensor] = None,\n            position_ids: Optional[torch.Tensor] = None,\n            attention_mask: Optional[torch.Tensor] = None,\n            past_key_values: Optional[Tuple[torch.FloatTensor]] = None,\n            inputs_embeds: Optional[torch.Tensor] = None,\n            labels: Optional[torch.Tensor] = None,\n            use_cache: Optional[bool] = None,\n            output_attentions: Optional[bool] = None,\n            output_hidden_states: Optional[bool] = None,\n            return_dict: Optional[bool] = None,\n            return_last_logit: Optional[bool] = False,\n    ):\n        use_cache = use_cache if use_cache is not None else self.config.use_cache\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n        transformer_outputs = self.transformer(\n            input_ids=input_ids,\n            position_ids=position_ids,\n            attention_mask=attention_mask,\n            past_key_values=past_key_values,\n            inputs_embeds=inputs_embeds,\n            use_cache=use_cache,\n            output_hidden_states=output_hidden_states,\n            return_dict=return_dict,\n        )\n\n        hidden_states = transformer_outputs[0]\n        if return_last_logit:\n            hidden_states = hidden_states[-1:]\n        lm_logits = self.transformer.output_layer(hidden_states)\n        lm_logits = lm_logits.transpose(0, 1).contiguous()\n\n        loss = None\n        if labels is not None:\n            lm_logits = lm_logits.to(torch.float32)\n\n            # Shift so that tokens < n predict n\n            shift_logits = lm_logits[..., :-1, :].contiguous()\n            shift_labels = labels[..., 1:].contiguous()\n            # Flatten the tokens\n            loss_fct = CrossEntropyLoss(ignore_index=-100)\n            loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))\n\n            lm_logits = lm_logits.to(hidden_states.dtype)\n            loss = loss.to(hidden_states.dtype)\n\n        if not return_dict:\n            output = (lm_logits,) + transformer_outputs[1:]\n            return ((loss,) + output) if loss is not None else output\n\n        return CausalLMOutputWithPast(\n            loss=loss,\n            logits=lm_logits,\n            past_key_values=transformer_outputs.past_key_values,\n            hidden_states=transformer_outputs.hidden_states,\n            attentions=transformer_outputs.attentions,\n        )\n\n    @staticmethod\n    def _reorder_cache(\n            past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor\n    ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:\n        \"\"\"\n        This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or\n        [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct\n        beam_idx at every generation step.\n\n        Output shares the same memory storage as `past`.\n        \"\"\"\n        return tuple(\n            (\n                layer_past[0].index_select(1, beam_idx.to(layer_past[0].device)),\n                layer_past[1].index_select(1, beam_idx.to(layer_past[1].device)),\n            )\n            for layer_past in past\n        )\n\n    def process_response(self, response):\n        response = response.strip()\n        response = response.replace(\"[[训练时间]]\", \"2023年\")\n        return response\n\n    def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = None):\n        prompt = \"\"\n        for i, (old_query, response) in enumerate(history):\n            prompt += \"[Round {}]\\n\\n问：{}\\n\\n答：{}\\n\\n\".format(i + 1, old_query, response)\n        prompt += \"[Round {}]\\n\\n问：{}\\n\\n答：\".format(len(history) + 1, query)\n        inputs = tokenizer([prompt], return_tensors=\"pt\")\n        inputs = inputs.to(self.device)\n        return inputs\n\n    def build_stream_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = None):\n        if history:\n            prompt = \"\\n\\n[Round {}]\\n\\n问：{}\\n\\n答：\".format(len(history) + 1, query)\n            input_ids = tokenizer.encode(prompt, add_special_tokens=False)\n            input_ids = input_ids[1:]\n            inputs = tokenizer.batch_encode_plus([(input_ids, None)], return_tensors=\"pt\", add_special_tokens=False)\n        else:\n            prompt = \"[Round {}]\\n\\n问：{}\\n\\n答：\".format(len(history) + 1, query)\n            inputs = tokenizer([prompt], return_tensors=\"pt\")\n        inputs = inputs.to(self.device)\n        return inputs\n\n\n    @torch.no_grad()\n    def chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 8192, num_beams=1,\n             do_sample=True, top_p=0.8, temperature=0.8, logits_processor=None, **kwargs):\n        if history is None:\n            history = []\n        if logits_processor is None:\n            logits_processor = LogitsProcessorList()\n        logits_processor.append(InvalidScoreLogitsProcessor())\n        gen_kwargs = {\"max_length\": max_length, \"num_beams\": num_beams, \"do_sample\": do_sample, \"top_p\": top_p,\n                      \"temperature\": temperature, \"logits_processor\": logits_processor, **kwargs}\n        inputs = self.build_inputs(tokenizer, query, history=history)\n        outputs = self.generate(**inputs, **gen_kwargs)\n        outputs = outputs.tolist()[0][len(inputs[\"input_ids\"][0]):]\n        response = tokenizer.decode(outputs)\n        response = self.process_response(response)\n        history = history + [(query, response)]\n        return response, history\n\n    @torch.no_grad()\n    def stream_chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, past_key_values=None,\n                    max_length: int = 8192, do_sample=True, top_p=0.8, temperature=0.8, logits_processor=None,\n                    return_past_key_values=False, **kwargs):\n        if history is None:\n            history = []\n        if logits_processor is None:\n            logits_processor = LogitsProcessorList()\n        logits_processor.append(InvalidScoreLogitsProcessor())\n        gen_kwargs = {\"max_length\": max_length, \"do_sample\": do_sample, \"top_p\": top_p,\n                      \"temperature\": temperature, \"logits_processor\": logits_processor, **kwargs}\n        if past_key_values is None and not return_past_key_values:\n            inputs = self.build_inputs(tokenizer, query, history=history)\n        else:\n            inputs = self.build_stream_inputs(tokenizer, query, history=history)\n        if past_key_values is not None:\n            past_length = past_key_values[0][0].shape[0]\n            inputs.position_ids += past_length\n            attention_mask = inputs.attention_mask\n            attention_mask = torch.cat((attention_mask.new_ones(1, past_length), attention_mask), dim=1)\n            inputs['attention_mask'] = attention_mask\n        for outputs in self.stream_generate(**inputs, past_key_values=past_key_values,\n                                            return_past_key_values=return_past_key_values, **gen_kwargs):\n            if return_past_key_values:\n                outputs, past_key_values = outputs\n            outputs = outputs.tolist()[0][len(inputs[\"input_ids\"][0]):]\n            response = tokenizer.decode(outputs)\n            if response and response[-1] != \"�\":\n                response = self.process_response(response)\n                new_history = history + [(query, response)]\n                if return_past_key_values:\n                    yield response, new_history, past_key_values\n                else:\n                    yield response, new_history\n\n    @torch.no_grad()\n    def stream_generate(\n            self,\n            input_ids,\n            generation_config: Optional[GenerationConfig] = None,\n            logits_processor: Optional[LogitsProcessorList] = None,\n            stopping_criteria: Optional[StoppingCriteriaList] = None,\n            prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,\n            return_past_key_values=False,\n            **kwargs,\n    ):\n        batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]\n\n        if generation_config is None:\n            generation_config = self.generation_config\n        generation_config = copy.deepcopy(generation_config)\n        model_kwargs = generation_config.update(**kwargs)\n        bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id\n\n        if isinstance(eos_token_id, int):\n            eos_token_id = [eos_token_id]\n\n        has_default_max_length = kwargs.get(\"max_length\") is None and generation_config.max_length is not None\n        if has_default_max_length and generation_config.max_new_tokens is None:\n            warnings.warn(\n                f\"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. \"\n                \"This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we\"\n                \" recommend using `max_new_tokens` to control the maximum length of the generation.\",\n                UserWarning,\n            )\n        elif generation_config.max_new_tokens is not None:\n            generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length\n            if not has_default_max_length:\n                logger.warn(\n                    f\"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=\"\n                    f\"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. \"\n                    \"Please refer to the documentation for more information. \"\n                    \"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)\",\n                    UserWarning,\n                )\n\n        if input_ids_seq_length >= generation_config.max_length:\n            input_ids_string = \"decoder_input_ids\" if self.config.is_encoder_decoder else \"input_ids\"\n            logger.warning(\n                f\"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to\"\n                f\" {generation_config.max_length}. This can lead to unexpected behavior. You should consider\"\n                \" increasing `max_new_tokens`.\"\n            )\n\n        # 2. Set generation parameters if not already defined\n        logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()\n        stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()\n\n        logits_processor = self._get_logits_processor(\n            generation_config=generation_config,\n            input_ids_seq_length=input_ids_seq_length,\n            encoder_input_ids=input_ids,\n            prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,\n            logits_processor=logits_processor,\n        )\n\n        stopping_criteria = self._get_stopping_criteria(\n            generation_config=generation_config, stopping_criteria=stopping_criteria\n        )\n        logits_warper = self._get_logits_warper(generation_config)\n\n        unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)\n        scores = None\n        while True:\n            model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)\n            # forward pass to get next token\n            outputs = self(\n                **model_inputs,\n                return_dict=True,\n                output_attentions=False,\n                output_hidden_states=False,\n            )\n\n            next_token_logits = outputs.logits[:, -1, :]\n\n            # pre-process distribution\n            next_token_scores = logits_processor(input_ids, next_token_logits)\n            next_token_scores = logits_warper(input_ids, next_token_scores)\n\n            # sample\n            probs = nn.functional.softmax(next_token_scores, dim=-1)\n            if generation_config.do_sample:\n                next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)\n            else:\n                next_tokens = torch.argmax(probs, dim=-1)\n\n            # update generated ids, model inputs, and length for next step\n            input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)\n            model_kwargs = self._update_model_kwargs_for_generation(\n                outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder\n            )\n            unfinished_sequences = unfinished_sequences.mul((sum(next_tokens != i for i in eos_token_id)).long())\n            if return_past_key_values:\n                yield input_ids, outputs.past_key_values\n            else:\n                yield input_ids\n            # stop when each sentence is finished, or if we exceed the maximum length\n            if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):\n                break\n\n    def quantize(self, bits: int, empty_init=False, device=None, **kwargs):\n        if bits == 0:\n            return\n\n        from .quantization import quantize\n\n        if self.quantized:\n            logger.info(\"Already quantized.\")\n            return self\n\n        self.quantized = True\n\n        self.config.quantization_bit = bits\n\n        self.transformer.encoder = quantize(self.transformer.encoder, bits, empty_init=empty_init, device=device,\n                                            **kwargs)\n        return self\n"
  },
  {
    "path": "chatglm2/quantization.py",
    "content": "from torch.nn import Linear\nfrom torch.nn.parameter import Parameter\n\nimport bz2\nimport torch\nimport base64\nimport ctypes\nfrom transformers.utils import logging\n\nfrom typing import List\nfrom functools import partial\n\nlogger = logging.get_logger(__name__)\n\ntry:\n    from cpm_kernels.kernels.base import LazyKernelCModule, KernelFunction, round_up\n\n    class Kernel:\n        def __init__(self, code: bytes, function_names: List[str]):\n            self.code = code\n            self._function_names = function_names\n            self._cmodule = LazyKernelCModule(self.code)\n\n            for name in self._function_names:\n                setattr(self, name, KernelFunction(self._cmodule, name))\n\n    quantization_code = \"$QlpoOTFBWSZTWU9yuJUAQHN//////////f/n/8/n///n//bt4dTidcVx8X3V9FV/92/v4B7/AD5FBQFAAAChSgKpFCFAFVSigUAAAEKhSgUUqgFBKigqVREQAABQBQIANDTTIGI00BkZBkNGE0A0BkBkGQGRkaNAaAGQNBoGgDIAAYIGTI0DQAQAaGmmQMRpoDIyDIaMJoBoDIDIMgMjI0aA0AMgaDQNAGQAAwQMmRoGgAgA0NNMgYjTQGRkGQ0YTQDQGQGQZAZGRo0BoAZA0GgaAMgABggZMjQNABABoaaZAxGmgMjIMhowmgGgMgMgyAyMjRoDQAyBoNA0AZAADBAyZGgaAAmqU1NEgJqnptU/Sn4jRR6J6epk2pqb1Q/SgAPUGgyNNGjQ2SBpoAZAAGg0NB6mgDIAAAAA2oaApSREBNAARhGiYEaEwU8pvImlP0k2aam1GaGqbFNM1MHpTwmkepmyU9R6nqPKekHqNNPUxNGhp6n6p6QaZ6o9TG1GMqcoV9ly6nRanHlq6zPNbnGZNi6HSug+2nPiZ13XcnFYZW+45W11CumhzYhchOJ2GLLV1OBjBjGf4TptOddTSOcVxhqYZMYwZXZZY00zI1paX5X9J+b+f4e+x43RXSxXPOdquiGpduatGyXneN696M9t4HU2eR5XX/kPhP261NTx3JO1Ow7LyuDmeo9a7d351T1ZxnvnrvYnrXv/hXxPCeuYx2XsNmO003eg9J3Z6U7b23meJ4ri01OdzTk9BNO96brz+qT5nuvvH3ds/G+m/JcG/F2XYuhXlvO+jP7U3XgrzPN/lr8Sf1n6j4j7jZs+s/T0tNaNNYzTs12rxjwztHlnire3Nzc3N1wuBwOBwXBvZfoHpD7rFmR99V5vj3aXza3xdBbXMalubTg/jIv5dfAi54Pdc75j4z412n3Npj3Ld/ENm7a3b/Cod6h/ret1/5vn/C+l+gdslMvgPSLJ8d8q+U66fevYn/tW1chleEtNTGlcHCbLRlq0tHzF5tsbbZZfHjjLgZu42XCuC3NrdjTasZGNzgxPIrGqp7r3p7L2p5XjnpPSmTd5XtzqnB6U87zzg1Ol0zd0zsLszxR6lkxp35u6/teL0L0W922cR7Lu1lpL9CsHirzuM2T+BgsyViT6LHcm0/Vr6U/7LGGyJeqTEjt0PHWhF5mCT7R9mtlDwriYv0Tyr/OxYt6qp5r0mPVT0608TqnqMZaarU2nFwrTzzlrs1ed7z1ux60wyr4ydCaTi3enW8x68x0zU7tXSlcmPSW1mGpWJMg4zmPC2lK96tp0OE80y4MfEvnZj8zGluR6b22ki1Ou9V2nCd9xovcPvcYMZYy0lvN60ScZ45vN6yeCeeXFb1lVjnnCar5fwXwE2bzJ4HI1XVPXfXZMm44GUsMpYsmLB65TuVdm0cl0b+i/wGNN66XjeV7zuPpHcnK/juhhjdfId5jMdE5nN0dGmmm2zZs2cexD5n9p/dY352XsvXHaZNWWsmmS1atjR452nYudzvqv2HMRyvNNnlMcDl3R2+yx2uVrBubTW9icHDVtbNXlZm7jma1rM4VurZZd2y6nUau7ZXZ7bVU+mnoOVxZGMrVmvX60605JwmzGZhhhjTWtaaaMaaGTGmNMZasY0iX8VMUl8eepaIrzGSpemWOQyZORk2bNpjUybMmxqYmknCGCFynutfksaZpjTNMaaatM0xsxcGR0sociNqxNSmhhR1ZJPbsn8qyF0t2qH6iYBclclalbtTTcHTDsPaX6rlnElph2Jyumumtynv2Kk8GI7rsvXbIcJgHJOSaSXnnGaI3m87RtVXJOZ/YtgdTE6Wpha6ZlE8ayXkef1fh602r2WwvfMXtMdLlkfnLFdYYwYso+bWqm7yJqHXZGw2nrS5ZanSYnWlxBxMF1V940K2wdrI7R6OYf7DGGamMmTSbRhlS45xmVOumF1EyPCmHrrN8wwZOOrdNtLeMtzFzDlWnfTBxMk2NaXIZHBYxYLD4w8yju0ao65Vz1OIXoS9dLanwCe1PWrYuWMqf1if1z2k2yYfKJ741PDgno1ZQ8DRqvUny3mNoWTzGO6m1DkrJI8JiR5cSd+vZdGOO8nrMoc5+NDUFsMSXaZJeNlMmGLtJsovOsUp7I9S5VojKxF6bTVEelXqlfJobQr3LozSh2Jk7VcrVMfhXqszGWMzNqGhqZY0OadxkyyMssKugZR0KNFXBHlqwmJgTE/BNVMk6ItJXZMR0H47GpXv/DMOvNkmVuaV1PRfEdxuqc7Hcd+ZV/zTLaRxWk0nl9CdCeM6mn5rstHIBcpiuwmUZXeq81DacHI2rmrZ5SuE5mOZd6LQrZg9mx32TprA8BMo5jKN6yLTCi3WzQaZSuhzTtM1fUTGVpG8Tw+KXI0tjEpiWxtLYynOlktSbVlaI5kxP8TDH8kx50xoxi5KcA4pcja8KWLRlO/Ks6q06ergnvm1ca3Tq8Uw7LTUsmWyctXPWmpitl/uvGcWTGXGuAXDfhqazGmjkxcJW5hMMMMpYsXl2TZYtVOddG3XCarUt6Ptq9CZXSNzyuRzqRZOjsxdBbFVz6OA5HI43r1jityVlVpVkxmOsyaYWE1NTGq1sOVh36mHMcxtSvcy70edG0ZGR3I1Go1GRlV7mWWo1G0ZGRqlvH40l7o4m5xMWLLLYyNjnqc8556mdPqLJ31n/1nWOncxzG1tizrHs/Z+d2vP/B/l8wdJ6rHUn2nbbDq4p6htFtYzMMMTaZis1K5GKzGNmxhmUx2DDlZ/qNnIx41xnaMfCZWYaZWtNLTNW8ND4Fw1MyZOCdM428suKG1ehW8TesOydg7J+YYcD4cYR+8dFK6M4E3HM9ZfRNNL+Sn6rsl4DsrDl2HpPCnfxjGXtbZtYys1ttlyJ4T+BvexjGWRjMszK4Jpc77D3GyuVD7q0+G8m9G+2+rGm7cOR2y7FdtY2XUYx/oNlfRYxhMYyYZkyyg55enna9Kt/FFi6GMMwYwdwxWgxGMLKYmUyGExTKMZkMFhkymKuh0NOBNnBu+23LdwDoZYYzGGMxtORaTU1pjTGWTTGGtMrNWUsyyTTLLG1qy2ZjbK2DBllWqxMtBMaYZQmcE7zvvRcTkclUwdkxTaSdyySt/7fpL+T1v516Ji97fwr5JbLu305zMn5+GMTTZ9F+y7ExwmGVfG44yxn3dLv6l5i+Wth1jCrDq21nW9LqvvDzz3Vf3LLH/O/32TJ/erx3bXftO4eF+G956D952K/An4NfvOpjFjExjevP/UmE0fIoZXx6/w6lX/no3D0bLt+ixjieBM6ksRd0yB4Lt2SwYNE+gd1detlZWUnpiZfGfFaK+4PyCa/v18V8X75pe9fLXzp7l3VjF76vWZmHwGz1IZNWT7b8yddJ4q5kyrVdfru6atWc7bVYztL9Jf4GXvT+Y8m9/YsXP6H018a8D4XVOqvfzqeR+6yZOD8dPv0+U7/q5Pl+2dNb0MjzGVH5p6MNQ7cOWvw62U9aHE8DprDek+McLyvDz+te+9Zhq5+YTruufMcWMabqysTmZVWjKPfnK0wyVcrsuhjZRdLkHNvD72b9abriOSGIxiLixMOoalNPXzy+wT/tf+U6HHONfsz+xe8ufHBdQWWGWLA9if0rsnmrxK5LvRZQeWsTCsrmOYy8VteVfuRfcVTtDLItLIsMYxZLdU/DbtSemxF6Z6Zo5WBXE4tFdCyVMMXMTEMZXVlS6Xec2T4e0tHsRcEuWshcJ2YsNF5rUx1E8ifCq6Z+ZP7qdCeu/aTwFd53l16/o0NOw6O3dLavP4Hbi4RdmuDk6DoYaninC0+o4uZjbJ7Rxeu0/FbuFg+q7DVS6fQe0rZ6NDGUNNU6DEqOaLTicKnYZMnBWruljQxoaS3dZhocDge0bSTyOvdAbG5hxe2xji7E/L55xX13wWNDi6HCekcFxfCPGxY0MXC+s7afWaMdDyjyr+o8Rudm/NabOZvdl274zH4f5XK9z6On1Pe/K5TdPAslg77BjuO6Y3eO7GqvOPG/stknp1leyvLL0Z7bl9I4noMvLkzytLhWYzrOZzLXCORe028rORzOg4N/L0HlMOQ3Pgmnbb6KczlabORpu980q37TBqRu0/p3PO6234Bl03Ynuz+9W7gnsEcmvYaYY3aMYY0wx3pYd+ujsXauWdaY5Xkbtl23fPzFHiDB/QMo0yFjBllYxTQYYyxkrwn7JufwJ/PfgJ+C83X69ni6zvXcnyXabv0ncbLwsceS+RNlyN2mnneJtX0ngYO0+e+0+UnA+Wch3ji8hj5an4h+i6XBySU4n+R0roVcbw5yvHrmr4Yw8Y7x6c+9POPYHI5HI5HI5HI5HGXGww4nE4nrVyOR8XeqPEO7PLOiukYa3Novk5hV4cdtYZLI93e+uxff2jRo0aNGjRo0aNG1bVtW1dy3m83m8+tQ5ZzHw3nObwOu8La9Rc1dtkdS8A3eTk823tnktXWlxN6Oixe06zrN70Isd9jiOgZFq9yfkPqP/SLhN2Myl8jDM43bl1nbcb4cO57jlh8Jow6pzXZdL4dyODTuuhu77FyO27DdwdRxmvO+O+3N2+BdqyTwLHVczDVY4UPE4O66/ZO2cx1LFzVdSXtF7G4HMbrauOHRw6c8FdZ5m9fHZHYZXfTlZquyynSyTTKke6vcffSD9pzPA/G7n7jxPmuhc1DHMynPMrGL6AdewYmwu5ko+UUyTwrMv27rPH1v1nGqd87+p6N6LU8k3NEng53xXyHS97+44OSg/sy/hn+Se6yfYNjW0/uTgP+PvWYzLMmjhcLB/gGpri6H83/84eUXWT6T9Hsv7785z/7z4icpW+zfXypuR7rx/gMdZb1/wC678pcs8/2a3mDitGHxl9mfPlll5MafWWqxk/eYuTDgcNMzDGWLWvsuglNxs53GtN6uWpktlW1tZZYcuinMMWmnNnJydze3b2Y1McBxrBkXw799izLMZZYyy0TkbsGM4p03S2uVu5s/XXUdSdec6smVxZYYGpVmT8A+8ajuEyV5FatkvVru2x6uxGXXbH4A+jvgP4GMYy3iPLXzq/6z65+E005ey+cwMZD3fZcqc6xpjTFjQ0P3U+e++cPYmTIwj0nrK5NPTfl3WvpfLtXDcb2HQMudYOxFXQBor4L4T6vrOauFctYXJQ++NUWmJe5bmx1jDiZS1dTqWxo4GR8jm3fttpmPHppk9PEyv4/y8/sO07XacOmcqc0x2Vi9BvNJvN5oW8x4mOsydpidRxMYJPx06m1bqPzq9KtK8sxXNXFodD/+MYYaJTLwOhc9brCsV18oOR1i4tXChyTkq4lf4y1Ke+9axjDHqs1mfBbMXuP4Hzi+X7t8vzv7bHerrUPgPCxhjre4fXdfLNtNM+Jd+Zdh8xd8wP87uNPoPgv4W7/5P2BuxfsMabNnMnza+54Pdi5U671GPZY8CehX8Voeoo7FHpkeEc6715FwHZrIrUrHaviPUbPZHND+IhczrP6FcYvhOZ0Di/ETt0OI+YwNWR9r7tpf6WDeZKZDB1+z2IthOl1mPyb5FluvEx9h9d0NnM0Y1XPFkWIsk1WotJ0PBMmkvjvQTd0e71tfeV+8r8lQ/tpzpsmxJ+InrI/dj2UajUajVTUajatRqNRtGo1Go1Go4wjeMpZFMVV9CHbofPraLsJ3JpWV2XOoanCuFky4y3PPNxucK2uKC1Lbdb1eo+m5XomN6HfeZsabHLHRX/K+offtNGGmHWctcVcG44MdSqsOLY9VzX+Zxfxn2HPdWTpzWvkrtJ8M5zorrKcquRytJ5N5DZmcaW02l76nWO+BqPXm1A2Ry/0q71dH/mqrqeFjkYxjEXtsX8qubTk67rGycyqsdm4tZx5D6D5hhi0waaWmiaMP81Yjii5qxPlPuU/GfTL1Y5E6Jyfiq63qTa39A4J0sOGDgO9WF9bOXl0XfPRbsY2bPNKPy1YrFYrFYmRhhlTIyMjJWJYZHXuCXI8OoXsvfljGLFicNifpp2XunoPiG1wtx3p1Tah+/DD66OnVtVXP9rKbVxOnL0tR/rHtqB5UDErUVcl11D4qqvjpOcxX7armUNJB3LpW6bxVvD08e8h3odKKvyCFZBdSh2FVcST9xV3n3T8t1j7Kr9qgrqXg+13Pt5U7JCvFXVIV1YG5lRhkVYZJYYDDD4KOIMoHCp26WS8GB7uBh2zIdgq/PKyInjV2STShuoapUdCpX1yTwqq/z1VvET7Kh5nVPkO8YyxjLt2MaaMmWTLQvx3qnzltnXW0p2jxgbEtSny/Osv8Y9pLMXYoHVPAhkVdWVeODhR6q9/Sxe2liwwZWMVvFXfRkeIDxAePUPIrdJ4ey6yquzH+PD/bUOWAu05qVHtFd8rrKHSoeNIOUqrYr3FXyToqfYJgwmJdKpXXOwYYegNNGMzfZPp/t3t/DVs4zjNTN61rRqaWaa4NYbRjTa0tWwy2Y2tGN8ZO8ofNKq4j9SL7I+cSm4/6ovLV5HNXLI0jJidwrtk6ynCaP6Z++GjRlWS3tLeW129Mi9evxU9mtz6s5J3Z7M2ngTgnKvmpomxpaLCzPfmx0JWE+m3NLDDGOX47RctdYYNK5jakdqLkRlI39n590T5zctGSwwZZDJj6kW8XSi6ot2MmWWJ0DUT3nuvebBudScjZ79g8cWJ8av0k+/bE5WKd5MdbFpbDVMxu1DVMmtNZGJvq1mtRbn6M+g/kP0FwDwr7quZs7xosNGpbscyxhhd9TyJyFwbLcxlTasg75vW7TsV5K7ji44XPMMrdoj+Y3rT0Hie62nlYV/pwczzOmdLqLhYkzGMzCZWGMQzGMSsZYY6Di1t4nlJ+Em63mJxrVLxPbYxNEdgc1dU2iOKyoYYWjNrEeHTYybVk0atSa7ehuwsWMWTqn1TrnS6hYsi71d1+s+k+ic70e20fzE/VaTdxT9ZtU4GIXdeNx3X77guYYfpHeTQjaMX6brOu4OY4K7Y2d9mbHarI5ox3p4GpJ2Vd/Tst60f7j999pppjR+Q/Qf8J/VaORs3cji7FfFuN61+ui9s8hix1OCh5KGVV23BPXvZfz3CLyHpix+exi8z/KnCnosY2eunor+cxyPO/xJ0vKey9OvE9VjqaYu0x3Z3jd6o2b1T12D+F8l232lwaaacD5LE8LBxu7WTlbWraWpew8Xexjel3E+wWD4APITdNqR8F3R3T0lunCQ4GaE9R37DxeCYfcHi4xci5ovKfxVs55y2hf+65E/Xdp6jR5nrebTmi5incpkyOjs50JvrZwstbbW6kfuuQw+2mykf/EXNFzxfKTrxew929TR6bWnGL//F3JFOFCQT3K4lQ\"\n\n    kernels = Kernel(\n        bz2.decompress(base64.b64decode(quantization_code)),\n        [\n            \"int4WeightCompression\",\n            \"int4WeightExtractionFloat\",\n            \"int4WeightExtractionHalf\",\n            \"int8WeightExtractionFloat\",\n            \"int8WeightExtractionHalf\",\n        ],\n    )\nexcept Exception as exception:\n    kernels = None\n    logger.warning(\"Failed to load cpm_kernels:\" + str(exception))\n\n\nclass W8A16Linear(torch.autograd.Function):\n    @staticmethod\n    def forward(ctx, inp: torch.Tensor, quant_w: torch.Tensor, scale_w: torch.Tensor, weight_bit_width):\n        ctx.inp_shape = inp.size()\n        ctx.weight_bit_width = weight_bit_width\n        out_features = quant_w.size(0)\n        inp = inp.contiguous().view(-1, inp.size(-1))\n        weight = extract_weight_to_half(quant_w, scale_w, weight_bit_width)\n        ctx.weight_shape = weight.size()\n        output = inp.mm(weight.t())\n        ctx.save_for_backward(inp, quant_w, scale_w)\n        return output.view(*(ctx.inp_shape[:-1] + (out_features,)))\n\n    @staticmethod\n    def backward(ctx, grad_output: torch.Tensor):\n        inp, quant_w, scale_w = ctx.saved_tensors\n        weight = extract_weight_to_half(quant_w, scale_w, ctx.weight_bit_width)\n        grad_output = grad_output.contiguous().view(-1, weight.size(0))\n        grad_input = grad_output.mm(weight)\n        grad_weight = grad_output.t().mm(inp)\n        return grad_input.view(ctx.inp_shape), grad_weight.view(ctx.weight_shape), None, None\n\n\ndef compress_int4_weight(weight: torch.Tensor):  # (n, m)\n    with torch.cuda.device(weight.device):\n        n, m = weight.size(0), weight.size(1)\n        assert m % 2 == 0\n        m = m // 2\n        out = torch.empty(n, m, dtype=torch.int8, device=\"cuda\")\n        stream = torch.cuda.current_stream()\n\n        gridDim = (n, 1, 1)\n        blockDim = (min(round_up(m, 32), 1024), 1, 1)\n\n        kernels.int4WeightCompression(\n            gridDim,\n            blockDim,\n            0,\n            stream,\n            [ctypes.c_void_p(weight.data_ptr()), ctypes.c_void_p(out.data_ptr()), ctypes.c_int32(n), ctypes.c_int32(m)],\n        )\n        return out\n\n\ndef extract_weight_to_half(weight: torch.Tensor, scale_list: torch.Tensor, source_bit_width: int):\n    assert scale_list.dtype in [torch.half, torch.bfloat16]\n    assert weight.dtype in [torch.int8]\n    if source_bit_width == 8:\n        return weight.to(scale_list.dtype) * scale_list[:, None]\n    elif source_bit_width == 4:\n        func = (\n            kernels.int4WeightExtractionHalf if scale_list.dtype == torch.half else kernels.int4WeightExtractionBFloat16\n        )\n    else:\n        assert False, \"Unsupported bit-width\"\n\n    with torch.cuda.device(weight.device):\n        n, m = weight.size(0), weight.size(1)\n        out = torch.empty(n, m * (8 // source_bit_width), dtype=scale_list.dtype, device=\"cuda\")\n        stream = torch.cuda.current_stream()\n\n        gridDim = (n, 1, 1)\n        blockDim = (min(round_up(m, 32), 1024), 1, 1)\n\n        func(\n            gridDim,\n            blockDim,\n            0,\n            stream,\n            [\n                ctypes.c_void_p(weight.data_ptr()),\n                ctypes.c_void_p(scale_list.data_ptr()),\n                ctypes.c_void_p(out.data_ptr()),\n                ctypes.c_int32(n),\n                ctypes.c_int32(m),\n            ],\n        )\n        return out\n\n\nclass QuantizedLinear(torch.nn.Module):\n    def __init__(self, weight_bit_width: int, weight, bias=None, device=\"cpu\", dtype=None, empty_init=False, *args,\n                 **kwargs):\n        super().__init__()\n        self.weight_bit_width = weight_bit_width\n\n        shape = weight.shape\n\n        if weight is None or empty_init:\n            self.weight = torch.empty(shape[0], shape[1] * weight_bit_width // 8, dtype=torch.int8, device=device)\n            self.weight_scale = torch.empty(shape[0], dtype=dtype, device=device)\n        else:\n            self.weight_scale = weight.abs().max(dim=-1).values / ((2 ** (weight_bit_width - 1)) - 1)\n            self.weight = torch.round(weight / self.weight_scale[:, None]).to(torch.int8)\n            if weight_bit_width == 4:\n                self.weight = compress_int4_weight(self.weight)\n\n        self.weight = Parameter(self.weight.to(device), requires_grad=False)\n        self.weight_scale = Parameter(self.weight_scale.to(device), requires_grad=False)\n        self.bias = Parameter(bias.to(device), requires_grad=False) if bias is not None else None\n\n    def forward(self, input):\n        output = W8A16Linear.apply(input, self.weight, self.weight_scale, self.weight_bit_width)\n        if self.bias is not None:\n            output = output + self.bias\n        return output\n\n\ndef quantize(model, weight_bit_width, empty_init=False, device=None):\n    \"\"\"Replace fp16 linear with quantized linear\"\"\"\n    for layer in model.layers:\n        layer.self_attention.query_key_value = QuantizedLinear(\n            weight_bit_width=weight_bit_width,\n            weight=layer.self_attention.query_key_value.weight.to(torch.cuda.current_device()),\n            bias=layer.self_attention.query_key_value.bias,\n            dtype=layer.self_attention.query_key_value.weight.dtype,\n            device=layer.self_attention.query_key_value.weight.device if device is None else device,\n            empty_init=empty_init\n        )\n        layer.self_attention.dense = QuantizedLinear(\n            weight_bit_width=weight_bit_width,\n            weight=layer.self_attention.dense.weight.to(torch.cuda.current_device()),\n            bias=layer.self_attention.dense.bias,\n            dtype=layer.self_attention.dense.weight.dtype,\n            device=layer.self_attention.dense.weight.device if device is None else device,\n            empty_init=empty_init\n        )\n        layer.mlp.dense_h_to_4h = QuantizedLinear(\n            weight_bit_width=weight_bit_width,\n            weight=layer.mlp.dense_h_to_4h.weight.to(torch.cuda.current_device()),\n            bias=layer.mlp.dense_h_to_4h.bias,\n            dtype=layer.mlp.dense_h_to_4h.weight.dtype,\n            device=layer.mlp.dense_h_to_4h.weight.device if device is None else device,\n            empty_init=empty_init\n        )\n        layer.mlp.dense_4h_to_h = QuantizedLinear(\n            weight_bit_width=weight_bit_width,\n            weight=layer.mlp.dense_4h_to_h.weight.to(torch.cuda.current_device()),\n            bias=layer.mlp.dense_4h_to_h.bias,\n            dtype=layer.mlp.dense_4h_to_h.weight.dtype,\n            device=layer.mlp.dense_4h_to_h.weight.device if device is None else device,\n            empty_init=empty_init\n        )\n\n    return model\n"
  },
  {
    "path": "chatglm2/tokenization_chatglm.py",
    "content": "import os\nimport torch\nfrom typing import List, Optional, Union, Dict\nfrom sentencepiece import SentencePieceProcessor\nfrom transformers import PreTrainedTokenizer\nfrom transformers.utils import logging, PaddingStrategy\nfrom transformers.tokenization_utils_base import EncodedInput, BatchEncoding\n\n\nclass SPTokenizer:\n    def __init__(self, model_path: str):\n        # reload tokenizer\n        assert os.path.isfile(model_path), model_path\n        self.sp_model = SentencePieceProcessor(model_file=model_path)\n\n        # BOS / EOS token IDs\n        self.n_words: int = self.sp_model.vocab_size()\n        self.bos_id: int = self.sp_model.bos_id()\n        self.eos_id: int = self.sp_model.eos_id()\n        self.pad_id: int = self.sp_model.eos_id()\n        assert self.sp_model.vocab_size() == self.sp_model.get_piece_size()\n\n        special_tokens = [\"[MASK]\", \"[gMASK]\", \"[sMASK]\", \"sop\", \"eop\"]\n        self.special_tokens = {}\n        self.index_special_tokens = {}\n        for token in special_tokens:\n            self.special_tokens[token] = self.n_words\n            self.index_special_tokens[self.n_words] = token\n            self.n_words += 1\n\n    def tokenize(self, s: str):\n        return self.sp_model.EncodeAsPieces(s)\n\n    def encode(self, s: str, bos: bool = False, eos: bool = False) -> List[int]:\n        assert type(s) is str\n        t = self.sp_model.encode(s)\n        if bos:\n            t = [self.bos_id] + t\n        if eos:\n            t = t + [self.eos_id]\n        return t\n\n    def decode(self, t: List[int]) -> str:\n        return self.sp_model.decode(t)\n\n    def decode_tokens(self, tokens: List[str]) -> str:\n        text = self.sp_model.DecodePieces(tokens)\n        return text\n\n    def convert_token_to_id(self, token):\n        \"\"\" Converts a token (str) in an id using the vocab. \"\"\"\n        if token in self.special_tokens:\n            return self.special_tokens[token]\n        return self.sp_model.PieceToId(token)\n\n    def convert_id_to_token(self, index):\n        \"\"\"Converts an index (integer) in a token (str) using the vocab.\"\"\"\n        if index in self.index_special_tokens:\n            return \"\"\n        return self.sp_model.IdToPiece(index)\n\n\nclass ChatGLMTokenizer(PreTrainedTokenizer):\n    vocab_files_names = {\"vocab_file\": \"tokenizer.model\"}\n\n    model_input_names = [\"input_ids\", \"attention_mask\", \"position_ids\"]\n\n    def __init__(self, vocab_file, padding_side=\"left\", **kwargs):\n        super().__init__(padding_side=padding_side, **kwargs)\n        self.name = \"GLMTokenizer\"\n\n        self.vocab_file = vocab_file\n        self.tokenizer = SPTokenizer(vocab_file)\n        self.special_tokens = {\n            \"<bos>\": self.tokenizer.bos_id,\n            \"<eos>\": self.tokenizer.eos_id,\n            \"<pad>\": self.tokenizer.pad_id\n        }\n\n    def get_command(self, token):\n        if token in self.special_tokens:\n            return self.special_tokens[token]\n        assert token in self.tokenizer.special_tokens, f\"{token} is not a special token for {self.name}\"\n        return self.tokenizer.special_tokens[token]\n\n    @property\n    def pad_token(self) -> str:\n        return \"</s>\"\n\n    @property\n    def pad_token_id(self):\n        return self.get_command(\"<pad>\")\n\n    @property\n    def vocab_size(self):\n        return self.tokenizer.n_words\n\n    def get_vocab(self):\n        \"\"\" Returns vocab as a dict \"\"\"\n        vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)}\n        vocab.update(self.added_tokens_encoder)\n        return vocab\n\n    def _tokenize(self, text, **kwargs):\n        return self.tokenizer.tokenize(text)\n\n    def _convert_token_to_id(self, token):\n        \"\"\" Converts a token (str) in an id using the vocab. \"\"\"\n        return self.tokenizer.convert_token_to_id(token)\n\n    def _convert_id_to_token(self, index):\n        \"\"\"Converts an index (integer) in a token (str) using the vocab.\"\"\"\n        return self.tokenizer.convert_id_to_token(index)\n\n    def convert_tokens_to_string(self, tokens: List[str]) -> str:\n        return self.tokenizer.decode_tokens(tokens)\n\n    def save_vocabulary(self, save_directory, filename_prefix=None):\n        \"\"\"\n        Save the vocabulary and special tokens file to a directory.\n\n        Args:\n            save_directory (`str`):\n                The directory in which to save the vocabulary.\n            filename_prefix (`str`, *optional*):\n                An optional prefix to add to the named of the saved files.\n\n        Returns:\n            `Tuple(str)`: Paths to the files saved.\n        \"\"\"\n        if os.path.isdir(save_directory):\n            vocab_file = os.path.join(\n                save_directory, self.vocab_files_names[\"vocab_file\"]\n            )\n        else:\n            vocab_file = save_directory\n\n        with open(self.vocab_file, 'rb') as fin:\n            proto_str = fin.read()\n\n        with open(vocab_file, \"wb\") as writer:\n            writer.write(proto_str)\n\n        return (vocab_file,)\n\n    def get_prefix_tokens(self):\n        prefix_tokens = [self.get_command(\"[gMASK]\"), self.get_command(\"sop\")]\n        return prefix_tokens\n\n    def build_inputs_with_special_tokens(\n            self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None\n    ) -> List[int]:\n        \"\"\"\n        Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\n        adding special tokens. A BERT sequence has the following format:\n\n        - single sequence: `[CLS] X [SEP]`\n        - pair of sequences: `[CLS] A [SEP] B [SEP]`\n\n        Args:\n            token_ids_0 (`List[int]`):\n                List of IDs to which the special tokens will be added.\n            token_ids_1 (`List[int]`, *optional*):\n                Optional second list of IDs for sequence pairs.\n\n        Returns:\n            `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.\n        \"\"\"\n        prefix_tokens = self.get_prefix_tokens()\n        token_ids_0 = prefix_tokens + token_ids_0\n        if token_ids_1 is not None:\n            token_ids_0 = token_ids_0 + token_ids_1 + [self.get_command(\"<eos>\")]\n        return token_ids_0\n\n    def _pad(\n            self,\n            encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],\n            max_length: Optional[int] = None,\n            padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,\n            pad_to_multiple_of: Optional[int] = None,\n            return_attention_mask: Optional[bool] = None,\n    ) -> dict:\n        \"\"\"\n        Pad encoded inputs (on left/right and up to predefined length or max length in the batch)\n\n        Args:\n            encoded_inputs:\n                Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).\n            max_length: maximum length of the returned list and optionally padding length (see below).\n                Will truncate by taking into account the special tokens.\n            padding_strategy: PaddingStrategy to use for padding.\n\n                - PaddingStrategy.LONGEST Pad to the longest sequence in the batch\n                - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)\n                - PaddingStrategy.DO_NOT_PAD: Do not pad\n                The tokenizer padding sides are defined in self.padding_side:\n\n                    - 'left': pads on the left of the sequences\n                    - 'right': pads on the right of the sequences\n            pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.\n                This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability\n                `>= 7.5` (Volta).\n            return_attention_mask:\n                (optional) Set to False to avoid returning attention mask (default: set to model specifics)\n        \"\"\"\n        # Load from model defaults\n        assert self.padding_side == \"left\"\n\n        required_input = encoded_inputs[self.model_input_names[0]]\n        seq_length = len(required_input)\n\n        if padding_strategy == PaddingStrategy.LONGEST:\n            max_length = len(required_input)\n\n        if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):\n            max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of\n\n        needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length\n\n        # Initialize attention mask if not present.\n        if \"attention_mask\" not in encoded_inputs:\n            encoded_inputs[\"attention_mask\"] = [1] * seq_length\n\n        if \"position_ids\" not in encoded_inputs:\n            encoded_inputs[\"position_ids\"] = list(range(seq_length))\n\n        if needs_to_be_padded:\n            difference = max_length - len(required_input)\n\n            if \"attention_mask\" in encoded_inputs:\n                encoded_inputs[\"attention_mask\"] = [0] * difference + encoded_inputs[\"attention_mask\"]\n            if \"position_ids\" in encoded_inputs:\n                encoded_inputs[\"position_ids\"] = [0] * difference + encoded_inputs[\"position_ids\"]\n            encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input\n\n        return encoded_inputs\n"
  },
  {
    "path": "chatglm3/configuration_chatglm.py",
    "content": "from transformers import PretrainedConfig\n\n\nclass ChatGLMConfig(PretrainedConfig):\n    model_type = \"chatglm\"\n    def __init__(\n        self,\n        num_layers=28,\n        padded_vocab_size=65024,\n        hidden_size=4096,\n        ffn_hidden_size=13696,\n        kv_channels=128,\n        num_attention_heads=32,\n        seq_length=2048,\n        hidden_dropout=0.0,\n        classifier_dropout=None,\n        attention_dropout=0.0,\n        layernorm_epsilon=1e-5,\n        rmsnorm=True,\n        apply_residual_connection_post_layernorm=False,\n        post_layer_norm=True,\n        add_bias_linear=False,\n        add_qkv_bias=False,\n        bias_dropout_fusion=True,\n        multi_query_attention=False,\n        multi_query_group_num=1,\n        apply_query_key_layer_scaling=True,\n        attention_softmax_in_fp32=True,\n        fp32_residual_connection=False,\n        quantization_bit=0,\n        pre_seq_len=None,\n        prefix_projection=False,\n        **kwargs\n    ):\n        self.num_layers = num_layers\n        self.vocab_size = padded_vocab_size\n        self.padded_vocab_size = padded_vocab_size\n        self.hidden_size = hidden_size\n        self.ffn_hidden_size = ffn_hidden_size\n        self.kv_channels = kv_channels\n        self.num_attention_heads = num_attention_heads\n        self.seq_length = seq_length\n        self.hidden_dropout = hidden_dropout\n        self.classifier_dropout = classifier_dropout\n        self.attention_dropout = attention_dropout\n        self.layernorm_epsilon = layernorm_epsilon\n        self.rmsnorm = rmsnorm\n        self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm\n        self.post_layer_norm = post_layer_norm\n        self.add_bias_linear = add_bias_linear\n        self.add_qkv_bias = add_qkv_bias\n        self.bias_dropout_fusion = bias_dropout_fusion\n        self.multi_query_attention = multi_query_attention\n        self.multi_query_group_num = multi_query_group_num\n        self.apply_query_key_layer_scaling = apply_query_key_layer_scaling\n        self.attention_softmax_in_fp32 = attention_softmax_in_fp32\n        self.fp32_residual_connection = fp32_residual_connection\n        self.quantization_bit = quantization_bit\n        self.pre_seq_len = pre_seq_len\n        self.prefix_projection = prefix_projection\n        super().__init__(**kwargs)"
  },
  {
    "path": "chatglm3/modeling_chatglm.py",
    "content": "\"\"\" PyTorch ChatGLM model. \"\"\"\n\nimport math\nimport copy\nimport warnings\nimport re\nimport sys\n\nimport torch\nimport torch.utils.checkpoint\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss, LayerNorm, MSELoss, BCEWithLogitsLoss\nfrom torch.nn.utils import skip_init\nfrom typing import Optional, Tuple, Union, List, Callable, Dict, Any\nfrom copy import deepcopy\n\nfrom transformers.modeling_outputs import (\n    BaseModelOutputWithPast,\n    CausalLMOutputWithPast,\n    SequenceClassifierOutputWithPast,\n)\nfrom transformers.modeling_utils import PreTrainedModel\nfrom transformers.utils import logging\nfrom transformers.generation.logits_process import LogitsProcessor\nfrom transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig, ModelOutput\n\nfrom .configuration_chatglm import ChatGLMConfig\n\n# flags required to enable jit fusion kernels\n\nif sys.platform != 'darwin':\n    torch._C._jit_set_profiling_mode(False)\n    torch._C._jit_set_profiling_executor(False)\n    torch._C._jit_override_can_fuse_on_cpu(True)\n    torch._C._jit_override_can_fuse_on_gpu(True)\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"THUDM/ChatGLM\"\n_CONFIG_FOR_DOC = \"ChatGLMConfig\"\n\nCHATGLM_6B_PRETRAINED_MODEL_ARCHIVE_LIST = [\n    \"THUDM/chatglm3-6b\",\n    # See all ChatGLM models at https://huggingface.co/models?filter=chatglm\n]\n\n\ndef default_init(cls, *args, **kwargs):\n    return cls(*args, **kwargs)\n\n\nclass InvalidScoreLogitsProcessor(LogitsProcessor):\n    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:\n        if torch.isnan(scores).any() or torch.isinf(scores).any():\n            scores.zero_()\n            scores[..., 5] = 5e4\n        return scores\n\n\nclass PrefixEncoder(torch.nn.Module):\n    \"\"\"\n    The torch.nn model to encode the prefix\n    Input shape: (batch-size, prefix-length)\n    Output shape: (batch-size, prefix-length, 2*layers*hidden)\n    \"\"\"\n\n    def __init__(self, config: ChatGLMConfig):\n        super().__init__()\n        self.prefix_projection = config.prefix_projection\n        if self.prefix_projection:\n            # Use a two-layer MLP to encode the prefix\n            kv_size = config.num_layers * config.kv_channels * config.multi_query_group_num * 2\n            self.embedding = torch.nn.Embedding(config.pre_seq_len, kv_size)\n            self.trans = torch.nn.Sequential(\n                torch.nn.Linear(kv_size, config.hidden_size),\n                torch.nn.Tanh(),\n                torch.nn.Linear(config.hidden_size, kv_size)\n            )\n        else:\n            self.embedding = torch.nn.Embedding(config.pre_seq_len,\n                                                config.num_layers * config.kv_channels * config.multi_query_group_num * 2)\n\n    def forward(self, prefix: torch.Tensor):\n        if self.prefix_projection:\n            prefix_tokens = self.embedding(prefix)\n            past_key_values = self.trans(prefix_tokens)\n        else:\n            past_key_values = self.embedding(prefix)\n        return past_key_values\n\n\ndef split_tensor_along_last_dim(\n        tensor: torch.Tensor,\n        num_partitions: int,\n        contiguous_split_chunks: bool = False,\n) -> List[torch.Tensor]:\n    \"\"\"Split a tensor along its last dimension.\n\n    Arguments:\n        tensor: input tensor.\n        num_partitions: number of partitions to split the tensor\n        contiguous_split_chunks: If True, make each chunk contiguous\n                                 in memory.\n\n    Returns:\n        A list of Tensors\n    \"\"\"\n    # Get the size and dimension.\n    last_dim = tensor.dim() - 1\n    last_dim_size = tensor.size()[last_dim] // num_partitions\n    # Split.\n    tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)\n    # Note: torch.split does not create contiguous tensors by default.\n    if contiguous_split_chunks:\n        return tuple(chunk.contiguous() for chunk in tensor_list)\n\n    return tensor_list\n\n\nclass RotaryEmbedding(nn.Module):\n    def __init__(self, dim, original_impl=False, device=None, dtype=None):\n        super().__init__()\n        inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, device=device).to(dtype=dtype) / dim))\n        self.register_buffer(\"inv_freq\", inv_freq)\n        self.dim = dim\n        self.original_impl = original_impl\n\n    def forward_impl(\n            self, seq_len: int, n_elem: int, dtype: torch.dtype, device: torch.device, base: int = 10000\n    ):\n        \"\"\"Enhanced Transformer with Rotary Position Embedding.\n\n        Derived from: https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/\n        transformers/rope/__init__.py. MIT License:\n        https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/license.\n        \"\"\"\n        # $\\Theta = {\\theta_i = 10000^{\\frac{2(i-1)}{d}}, i \\in [1, 2, ..., \\frac{d}{2}]}$\n        theta = 1.0 / (base ** (torch.arange(0, n_elem, 2, dtype=torch.float, device=device) / n_elem))\n\n        # Create position indexes `[0, 1, ..., seq_len - 1]`\n        seq_idx = torch.arange(seq_len, dtype=torch.float, device=device)\n\n        # Calculate the product of position index and $\\theta_i$\n        idx_theta = torch.outer(seq_idx, theta).float()\n\n        cache = torch.stack([torch.cos(idx_theta), torch.sin(idx_theta)], dim=-1)\n\n        # this is to mimic the behaviour of complex32, else we will get different results\n        if dtype in (torch.float16, torch.bfloat16, torch.int8):\n            cache = cache.bfloat16() if dtype == torch.bfloat16 else cache.half()\n        return cache\n\n    def forward(self, max_seq_len, offset=0):\n        return self.forward_impl(\n            max_seq_len, self.dim, dtype=self.inv_freq.dtype, device=self.inv_freq.device\n        )\n\n\n@torch.jit.script\ndef apply_rotary_pos_emb(x: torch.Tensor, rope_cache: torch.Tensor) -> torch.Tensor:\n    # x: [sq, b, np, hn]\n    sq, b, np, hn = x.size(0), x.size(1), x.size(2), x.size(3)\n    rot_dim = rope_cache.shape[-2] * 2\n    x, x_pass = x[..., :rot_dim], x[..., rot_dim:]\n    # truncate to support variable sizes\n    rope_cache = rope_cache[:sq]\n    xshaped = x.reshape(sq, -1, np, rot_dim // 2, 2)\n    rope_cache = rope_cache.view(sq, -1, 1, xshaped.size(3), 2)\n    x_out2 = torch.stack(\n        [\n            xshaped[..., 0] * rope_cache[..., 0] - xshaped[..., 1] * rope_cache[..., 1],\n            xshaped[..., 1] * rope_cache[..., 0] + xshaped[..., 0] * rope_cache[..., 1],\n        ],\n        -1,\n    )\n    x_out2 = x_out2.flatten(3)\n    return torch.cat((x_out2, x_pass), dim=-1)\n\n\nclass RMSNorm(torch.nn.Module):\n    def __init__(self, normalized_shape, eps=1e-5, device=None, dtype=None, **kwargs):\n        super().__init__()\n        self.weight = torch.nn.Parameter(torch.empty(normalized_shape, device=device, dtype=dtype))\n        self.eps = eps\n\n    def forward(self, hidden_states: torch.Tensor):\n        input_dtype = hidden_states.dtype\n        variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)\n        hidden_states = hidden_states * torch.rsqrt(variance + self.eps)\n\n        return (self.weight * hidden_states).to(input_dtype)\n\n\nclass CoreAttention(torch.nn.Module):\n    def __init__(self, config: ChatGLMConfig, layer_number):\n        super(CoreAttention, self).__init__()\n\n        self.apply_query_key_layer_scaling = config.apply_query_key_layer_scaling\n        self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32\n        if self.apply_query_key_layer_scaling:\n            self.attention_softmax_in_fp32 = True\n        self.layer_number = max(1, layer_number)\n\n        projection_size = config.kv_channels * config.num_attention_heads\n\n        # Per attention head and per partition values.\n        self.hidden_size_per_partition = projection_size\n        self.hidden_size_per_attention_head = projection_size // config.num_attention_heads\n        self.num_attention_heads_per_partition = config.num_attention_heads\n\n        coeff = None\n        self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)\n        if self.apply_query_key_layer_scaling:\n            coeff = self.layer_number\n            self.norm_factor *= coeff\n        self.coeff = coeff\n\n        self.attention_dropout = torch.nn.Dropout(config.attention_dropout)\n\n    def forward(self, query_layer, key_layer, value_layer, attention_mask):\n        pytorch_major_version = int(torch.__version__.split('.')[0])\n        if pytorch_major_version >= 2:\n            query_layer, key_layer, value_layer = [k.permute(1, 2, 0, 3) for k in [query_layer, key_layer, value_layer]]\n            if attention_mask is None and query_layer.shape[2] == key_layer.shape[2]:\n                context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer,\n                                                                                 is_causal=True)\n            else:\n                if attention_mask is not None:\n                    attention_mask = ~attention_mask\n                context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer,\n                                                                                 attention_mask)\n            context_layer = context_layer.permute(2, 0, 1, 3)\n            new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)\n            context_layer = context_layer.reshape(*new_context_layer_shape)\n        else:\n            # Raw attention scores\n\n            # [b, np, sq, sk]\n            output_size = (query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0))\n\n            # [sq, b, np, hn] -> [sq, b * np, hn]\n            query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1)\n            # [sk, b, np, hn] -> [sk, b * np, hn]\n            key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1)\n\n            # preallocting input tensor: [b * np, sq, sk]\n            matmul_input_buffer = torch.empty(\n                output_size[0] * output_size[1], output_size[2], output_size[3], dtype=query_layer.dtype,\n                device=query_layer.device\n            )\n\n            # Raw attention scores. [b * np, sq, sk]\n            matmul_result = torch.baddbmm(\n                matmul_input_buffer,\n                query_layer.transpose(0, 1),  # [b * np, sq, hn]\n                key_layer.transpose(0, 1).transpose(1, 2),  # [b * np, hn, sk]\n                beta=0.0,\n                alpha=(1.0 / self.norm_factor),\n            )\n\n            # change view to [b, np, sq, sk]\n            attention_scores = matmul_result.view(*output_size)\n\n            # ===========================\n            # Attention probs and dropout\n            # ===========================\n\n            # attention scores and attention mask [b, np, sq, sk]\n            if self.attention_softmax_in_fp32:\n                attention_scores = attention_scores.float()\n            if self.coeff is not None:\n                attention_scores = attention_scores * self.coeff\n            if attention_mask is None and attention_scores.shape[2] == attention_scores.shape[3]:\n                attention_mask = torch.ones(output_size[0], 1, output_size[2], output_size[3],\n                                            device=attention_scores.device, dtype=torch.bool)\n                attention_mask.tril_()\n                attention_mask = ~attention_mask\n            if attention_mask is not None:\n                attention_scores = attention_scores.masked_fill(attention_mask, float(\"-inf\"))\n            attention_probs = F.softmax(attention_scores, dim=-1)\n            attention_probs = attention_probs.type_as(value_layer)\n\n            # This is actually dropping out entire tokens to attend to, which might\n            # seem a bit unusual, but is taken from the original Transformer paper.\n            attention_probs = self.attention_dropout(attention_probs)\n            # =========================\n            # Context layer. [sq, b, hp]\n            # =========================\n\n            # value_layer -> context layer.\n            # [sk, b, np, hn] --> [b, np, sq, hn]\n\n            # context layer shape: [b, np, sq, hn]\n            output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3))\n            # change view [sk, b * np, hn]\n            value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1)\n            # change view [b * np, sq, sk]\n            attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1)\n            # matmul: [b * np, sq, hn]\n            context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))\n            # change view [b, np, sq, hn]\n            context_layer = context_layer.view(*output_size)\n            # [b, np, sq, hn] --> [sq, b, np, hn]\n            context_layer = context_layer.permute(2, 0, 1, 3).contiguous()\n            # [sq, b, np, hn] --> [sq, b, hp]\n            new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)\n            context_layer = context_layer.view(*new_context_layer_shape)\n\n        return context_layer\n\n\nclass SelfAttention(torch.nn.Module):\n    \"\"\"Parallel self-attention layer abstract class.\n\n    Self-attention layer takes input with size [s, b, h]\n    and returns output of the same size.\n    \"\"\"\n\n    def __init__(self, config: ChatGLMConfig, layer_number, device=None):\n        super(SelfAttention, self).__init__()\n        self.layer_number = max(1, layer_number)\n\n        self.projection_size = config.kv_channels * config.num_attention_heads\n\n        # Per attention head and per partition values.\n        self.hidden_size_per_attention_head = self.projection_size // config.num_attention_heads\n        self.num_attention_heads_per_partition = config.num_attention_heads\n\n        self.multi_query_attention = config.multi_query_attention\n        self.qkv_hidden_size = 3 * self.projection_size\n        if self.multi_query_attention:\n            self.num_multi_query_groups_per_partition = config.multi_query_group_num\n            self.qkv_hidden_size = (\n                    self.projection_size + 2 * self.hidden_size_per_attention_head * config.multi_query_group_num\n            )\n        self.query_key_value = nn.Linear(config.hidden_size, self.qkv_hidden_size,\n                                         bias=config.add_bias_linear or config.add_qkv_bias,\n                                         device=device, **_config_to_kwargs(config)\n                                         )\n\n        self.core_attention = CoreAttention(config, self.layer_number)\n\n        # Output.\n        self.dense = nn.Linear(self.projection_size, config.hidden_size, bias=config.add_bias_linear,\n                               device=device, **_config_to_kwargs(config)\n                               )\n\n    def _allocate_memory(self, inference_max_sequence_len, batch_size, device=None, dtype=None):\n        if self.multi_query_attention:\n            num_attention_heads = self.num_multi_query_groups_per_partition\n        else:\n            num_attention_heads = self.num_attention_heads_per_partition\n        return torch.empty(\n            inference_max_sequence_len,\n            batch_size,\n            num_attention_heads,\n            self.hidden_size_per_attention_head,\n            dtype=dtype,\n            device=device,\n        )\n\n    def forward(\n            self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True\n    ):\n        # hidden_states: [sq, b, h]\n\n        # =================================================\n        # Pre-allocate memory for key-values for inference.\n        # =================================================\n        # =====================\n        # Query, Key, and Value\n        # =====================\n\n        # Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)]\n        mixed_x_layer = self.query_key_value(hidden_states)\n\n        if self.multi_query_attention:\n            (query_layer, key_layer, value_layer) = mixed_x_layer.split(\n                [\n                    self.num_attention_heads_per_partition * self.hidden_size_per_attention_head,\n                    self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head,\n                    self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head,\n                ],\n                dim=-1,\n            )\n            query_layer = query_layer.view(\n                query_layer.size()[:-1] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head)\n            )\n            key_layer = key_layer.view(\n                key_layer.size()[:-1] + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head)\n            )\n            value_layer = value_layer.view(\n                value_layer.size()[:-1]\n                + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head)\n            )\n        else:\n            new_tensor_shape = mixed_x_layer.size()[:-1] + \\\n                               (self.num_attention_heads_per_partition,\n                                3 * self.hidden_size_per_attention_head)\n            mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)\n\n            # [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn]\n            (query_layer, key_layer, value_layer) = split_tensor_along_last_dim(mixed_x_layer, 3)\n\n        # apply relative positional encoding (rotary embedding)\n        if rotary_pos_emb is not None:\n            query_layer = apply_rotary_pos_emb(query_layer, rotary_pos_emb)\n            key_layer = apply_rotary_pos_emb(key_layer, rotary_pos_emb)\n\n        # adjust key and value for inference\n        if kv_cache is not None:\n            cache_k, cache_v = kv_cache\n            key_layer = torch.cat((cache_k, key_layer), dim=0)\n            value_layer = torch.cat((cache_v, value_layer), dim=0)\n        if use_cache:\n            kv_cache = (key_layer, value_layer)\n        else:\n            kv_cache = None\n\n        if self.multi_query_attention:\n            key_layer = key_layer.unsqueeze(-2)\n            key_layer = key_layer.expand(\n                -1, -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1\n            )\n            key_layer = key_layer.contiguous().view(\n                key_layer.size()[:2] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head)\n            )\n            value_layer = value_layer.unsqueeze(-2)\n            value_layer = value_layer.expand(\n                -1, -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1\n            )\n            value_layer = value_layer.contiguous().view(\n                value_layer.size()[:2] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head)\n            )\n\n        # ==================================\n        # core attention computation\n        # ==================================\n\n        context_layer = self.core_attention(query_layer, key_layer, value_layer, attention_mask)\n\n        # =================\n        # Output. [sq, b, h]\n        # =================\n\n        output = self.dense(context_layer)\n\n        return output, kv_cache\n\n\ndef _config_to_kwargs(args):\n    common_kwargs = {\n        \"dtype\": args.torch_dtype,\n    }\n    return common_kwargs\n\n\nclass MLP(torch.nn.Module):\n    \"\"\"MLP.\n\n    MLP will take the input with h hidden state, project it to 4*h\n    hidden dimension, perform nonlinear transformation, and project the\n    state back into h hidden dimension.\n    \"\"\"\n\n    def __init__(self, config: ChatGLMConfig, device=None):\n        super(MLP, self).__init__()\n\n        self.add_bias = config.add_bias_linear\n\n        # Project to 4h. If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf\n        self.dense_h_to_4h = nn.Linear(\n            config.hidden_size,\n            config.ffn_hidden_size * 2,\n            bias=self.add_bias,\n            device=device,\n            **_config_to_kwargs(config)\n        )\n\n        def swiglu(x):\n            x = torch.chunk(x, 2, dim=-1)\n            return F.silu(x[0]) * x[1]\n\n        self.activation_func = swiglu\n\n        # Project back to h.\n        self.dense_4h_to_h = nn.Linear(\n            config.ffn_hidden_size,\n            config.hidden_size,\n            bias=self.add_bias,\n            device=device,\n            **_config_to_kwargs(config)\n        )\n\n    def forward(self, hidden_states):\n        # [s, b, 4hp]\n        intermediate_parallel = self.dense_h_to_4h(hidden_states)\n        intermediate_parallel = self.activation_func(intermediate_parallel)\n        # [s, b, h]\n        output = self.dense_4h_to_h(intermediate_parallel)\n        return output\n\n\nclass GLMBlock(torch.nn.Module):\n    \"\"\"A single transformer layer.\n\n    Transformer layer takes input with size [s, b, h] and returns an\n    output of the same size.\n    \"\"\"\n\n    def __init__(self, config: ChatGLMConfig, layer_number, device=None):\n        super(GLMBlock, self).__init__()\n        self.layer_number = layer_number\n\n        self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm\n\n        self.fp32_residual_connection = config.fp32_residual_connection\n\n        LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm\n        # Layernorm on the input data.\n        self.input_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device,\n                                             dtype=config.torch_dtype)\n\n        # Self attention.\n        self.self_attention = SelfAttention(config, layer_number, device=device)\n        self.hidden_dropout = config.hidden_dropout\n\n        # Layernorm on the attention output\n        self.post_attention_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device,\n                                                      dtype=config.torch_dtype)\n\n        # MLP\n        self.mlp = MLP(config, device=device)\n\n    def forward(\n            self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True,\n    ):\n        # hidden_states: [s, b, h]\n\n        # Layer norm at the beginning of the transformer layer.\n        layernorm_output = self.input_layernorm(hidden_states)\n        # Self attention.\n        attention_output, kv_cache = self.self_attention(\n            layernorm_output,\n            attention_mask,\n            rotary_pos_emb,\n            kv_cache=kv_cache,\n            use_cache=use_cache\n        )\n\n        # Residual connection.\n        if self.apply_residual_connection_post_layernorm:\n            residual = layernorm_output\n        else:\n            residual = hidden_states\n\n        layernorm_input = torch.nn.functional.dropout(attention_output, p=self.hidden_dropout, training=self.training)\n        layernorm_input = residual + layernorm_input\n\n        # Layer norm post the self attention.\n        layernorm_output = self.post_attention_layernorm(layernorm_input)\n\n        # MLP.\n        mlp_output = self.mlp(layernorm_output)\n\n        # Second residual connection.\n        if self.apply_residual_connection_post_layernorm:\n            residual = layernorm_output\n        else:\n            residual = layernorm_input\n\n        output = torch.nn.functional.dropout(mlp_output, p=self.hidden_dropout, training=self.training)\n        output = residual + output\n\n        return output, kv_cache\n\n\nclass GLMTransformer(torch.nn.Module):\n    \"\"\"Transformer class.\"\"\"\n\n    def __init__(self, config: ChatGLMConfig, device=None):\n        super(GLMTransformer, self).__init__()\n\n        self.fp32_residual_connection = config.fp32_residual_connection\n        self.post_layer_norm = config.post_layer_norm\n\n        # Number of layers.\n        self.num_layers = config.num_layers\n\n        # Transformer layers.\n        def build_layer(layer_number):\n            return GLMBlock(config, layer_number, device=device)\n\n        self.layers = torch.nn.ModuleList([build_layer(i + 1) for i in range(self.num_layers)])\n\n        if self.post_layer_norm:\n            LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm\n            # Final layer norm before output.\n            self.final_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device,\n                                                 dtype=config.torch_dtype)\n\n        self.gradient_checkpointing = False\n\n    def _get_layer(self, layer_number):\n        return self.layers[layer_number]\n\n    def forward(\n            self, hidden_states, attention_mask, rotary_pos_emb, kv_caches=None,\n            use_cache: Optional[bool] = True,\n            output_hidden_states: Optional[bool] = False,\n    ):\n        if not kv_caches:\n            kv_caches = [None for _ in range(self.num_layers)]\n        presents = () if use_cache else None\n        if self.gradient_checkpointing and self.training:\n            if use_cache:\n                logger.warning_once(\n                    \"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...\"\n                )\n                use_cache = False\n\n        all_self_attentions = None\n        all_hidden_states = () if output_hidden_states else None\n        for index in range(self.num_layers):\n            if output_hidden_states:\n                all_hidden_states = all_hidden_states + (hidden_states,)\n\n            layer = self._get_layer(index)\n            if self.gradient_checkpointing and self.training:\n                layer_ret = torch.utils.checkpoint.checkpoint(\n                    layer,\n                    hidden_states,\n                    attention_mask,\n                    rotary_pos_emb,\n                    kv_caches[index],\n                    use_cache\n                )\n            else:\n                layer_ret = layer(\n                    hidden_states,\n                    attention_mask,\n                    rotary_pos_emb,\n                    kv_cache=kv_caches[index],\n                    use_cache=use_cache\n                )\n            hidden_states, kv_cache = layer_ret\n            if use_cache:\n                presents = presents + (kv_cache,)\n\n        if output_hidden_states:\n            all_hidden_states = all_hidden_states + (hidden_states,)\n\n        # Final layer norm.\n        if self.post_layer_norm:\n            hidden_states = self.final_layernorm(hidden_states)\n\n        return hidden_states, presents, all_hidden_states, all_self_attentions\n\n\nclass ChatGLMPreTrainedModel(PreTrainedModel):\n    \"\"\"\n    An abstract class to handle weights initialization and\n    a simple interface for downloading and loading pretrained models.\n    \"\"\"\n\n    is_parallelizable = False\n    supports_gradient_checkpointing = True\n    config_class = ChatGLMConfig\n    base_model_prefix = \"transformer\"\n    _no_split_modules = [\"GLMBlock\"]\n\n    def _init_weights(self, module: nn.Module):\n        \"\"\"Initialize the weights.\"\"\"\n        return\n\n    def get_masks(self, input_ids, past_key_values, padding_mask=None):\n        batch_size, seq_length = input_ids.shape\n        full_attention_mask = torch.ones(batch_size, seq_length, seq_length, device=input_ids.device)\n        full_attention_mask.tril_()\n        past_length = 0\n        if past_key_values:\n            past_length = past_key_values[0][0].shape[0]\n        if past_length:\n            full_attention_mask = torch.cat((torch.ones(batch_size, seq_length, past_length,\n                                                        device=input_ids.device), full_attention_mask), dim=-1)\n        if padding_mask is not None:\n            full_attention_mask = full_attention_mask * padding_mask.unsqueeze(1)\n        if not past_length and padding_mask is not None:\n            full_attention_mask -= padding_mask.unsqueeze(-1) - 1\n        full_attention_mask = (full_attention_mask < 0.5).bool()\n        full_attention_mask.unsqueeze_(1)\n        return full_attention_mask\n\n    def get_position_ids(self, input_ids, device):\n        batch_size, seq_length = input_ids.shape\n        position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1)\n        return position_ids\n\n    def _set_gradient_checkpointing(self, module, value=False):\n        if isinstance(module, GLMTransformer):\n            module.gradient_checkpointing = value\n\n\nclass Embedding(torch.nn.Module):\n    \"\"\"Language model embeddings.\"\"\"\n\n    def __init__(self, config: ChatGLMConfig, device=None):\n        super(Embedding, self).__init__()\n\n        self.hidden_size = config.hidden_size\n        # Word embeddings (parallel).\n        self.word_embeddings = nn.Embedding(\n            config.padded_vocab_size,\n            self.hidden_size,\n            dtype=config.torch_dtype,\n            device=device\n        )\n        self.fp32_residual_connection = config.fp32_residual_connection\n\n    def forward(self, input_ids):\n        # Embeddings.\n        words_embeddings = self.word_embeddings(input_ids)\n        embeddings = words_embeddings\n        # Data format change to avoid explicit tranposes : [b s h] --> [s b h].\n        embeddings = embeddings.transpose(0, 1).contiguous()\n        # If the input flag for fp32 residual connection is set, convert for float.\n        if self.fp32_residual_connection:\n            embeddings = embeddings.float()\n        return embeddings\n\n\nclass ChatGLMModel(ChatGLMPreTrainedModel):\n    def __init__(self, config: ChatGLMConfig, device=None, empty_init=True):\n        super().__init__(config)\n        if empty_init:\n            init_method = skip_init\n        else:\n            init_method = default_init\n        init_kwargs = {}\n        if device is not None:\n            init_kwargs[\"device\"] = device\n        self.embedding = init_method(Embedding, config, **init_kwargs)\n        self.num_layers = config.num_layers\n        self.multi_query_group_num = config.multi_query_group_num\n        self.kv_channels = config.kv_channels\n\n        # Rotary positional embeddings\n        self.seq_length = config.seq_length\n        rotary_dim = (\n            config.hidden_size // config.num_attention_heads if config.kv_channels is None else config.kv_channels\n        )\n\n        self.rotary_pos_emb = RotaryEmbedding(rotary_dim // 2, original_impl=config.original_rope, device=device,\n                                              dtype=config.torch_dtype)\n        self.encoder = init_method(GLMTransformer, config, **init_kwargs)\n        self.output_layer = init_method(nn.Linear, config.hidden_size, config.padded_vocab_size, bias=False,\n                                        dtype=config.torch_dtype, **init_kwargs)\n        self.pre_seq_len = config.pre_seq_len\n        self.prefix_projection = config.prefix_projection\n        if self.pre_seq_len is not None:\n            for param in self.parameters():\n                param.requires_grad = False\n            self.prefix_tokens = torch.arange(self.pre_seq_len).long()\n            self.prefix_encoder = PrefixEncoder(config)\n            self.dropout = torch.nn.Dropout(0.1)\n\n    def get_input_embeddings(self):\n        return self.embedding.word_embeddings\n\n    def get_prompt(self, batch_size, device, dtype=torch.half):\n        prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(device)\n        past_key_values = self.prefix_encoder(prefix_tokens).type(dtype)\n        past_key_values = past_key_values.view(\n            batch_size,\n            self.pre_seq_len,\n            self.num_layers * 2,\n            self.multi_query_group_num,\n            self.kv_channels\n        )\n        # seq_len, b, nh, hidden_size\n        past_key_values = self.dropout(past_key_values)\n        past_key_values = past_key_values.permute([2, 1, 0, 3, 4]).split(2)\n        return past_key_values\n\n    def forward(\n            self,\n            input_ids,\n            position_ids: Optional[torch.Tensor] = None,\n            attention_mask: Optional[torch.BoolTensor] = None,\n            full_attention_mask: Optional[torch.BoolTensor] = None,\n            past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,\n            inputs_embeds: Optional[torch.Tensor] = None,\n            use_cache: Optional[bool] = None,\n            output_hidden_states: Optional[bool] = None,\n            return_dict: Optional[bool] = None,\n    ):\n        output_hidden_states = (\n            output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        )\n        use_cache = use_cache if use_cache is not None else self.config.use_cache\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n        batch_size, seq_length = input_ids.shape\n\n        if inputs_embeds is None:\n            inputs_embeds = self.embedding(input_ids)\n\n        if self.pre_seq_len is not None:\n            if past_key_values is None:\n                past_key_values = self.get_prompt(batch_size=batch_size, device=input_ids.device,\n                                                  dtype=inputs_embeds.dtype)\n            if attention_mask is not None:\n                attention_mask = torch.cat([attention_mask.new_ones((batch_size, self.pre_seq_len)),\n                                            attention_mask], dim=-1)\n\n        if full_attention_mask is None:\n            if (attention_mask is not None and not attention_mask.all()) or (past_key_values and seq_length != 1):\n                full_attention_mask = self.get_masks(input_ids, past_key_values, padding_mask=attention_mask)\n\n        # Rotary positional embeddings\n        rotary_pos_emb = self.rotary_pos_emb(self.seq_length)\n        if position_ids is not None:\n            rotary_pos_emb = rotary_pos_emb[position_ids]\n        else:\n            rotary_pos_emb = rotary_pos_emb[None, :seq_length]\n        rotary_pos_emb = rotary_pos_emb.transpose(0, 1).contiguous()\n\n        # Run encoder.\n        hidden_states, presents, all_hidden_states, all_self_attentions = self.encoder(\n            inputs_embeds, full_attention_mask, rotary_pos_emb=rotary_pos_emb,\n            kv_caches=past_key_values, use_cache=use_cache, output_hidden_states=output_hidden_states\n        )\n\n        if not return_dict:\n            return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)\n\n        return BaseModelOutputWithPast(\n            last_hidden_state=hidden_states,\n            past_key_values=presents,\n            hidden_states=all_hidden_states,\n            attentions=all_self_attentions,\n        )\n\n    def quantize(self, weight_bit_width: int):\n        from .quantization import quantize\n        quantize(self.encoder, weight_bit_width)\n        return self\n\n\nclass ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):\n    def __init__(self, config: ChatGLMConfig, empty_init=True, device=None):\n        super().__init__(config)\n\n        self.max_sequence_length = config.max_length\n        self.transformer = ChatGLMModel(config, empty_init=empty_init, device=device)\n        self.config = config\n        self.quantized = False\n\n        if self.config.quantization_bit:\n            self.quantize(self.config.quantization_bit, empty_init=True)\n\n    def _update_model_kwargs_for_generation(\n            self,\n            outputs: ModelOutput,\n            model_kwargs: Dict[str, Any],\n            is_encoder_decoder: bool = False,\n            standardize_cache_format: bool = False,\n    ) -> Dict[str, Any]:\n        # update past_key_values\n        model_kwargs[\"past_key_values\"] = self._extract_past_from_model_output(\n            outputs, standardize_cache_format=standardize_cache_format\n        )\n\n        # update attention mask\n        if \"attention_mask\" in model_kwargs:\n            attention_mask = model_kwargs[\"attention_mask\"]\n            model_kwargs[\"attention_mask\"] = torch.cat(\n                [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1\n            )\n\n        # update position ids\n        if \"position_ids\" in model_kwargs:\n            position_ids = model_kwargs[\"position_ids\"]\n            new_position_id = position_ids[..., -1:].clone()\n            new_position_id += 1\n            model_kwargs[\"position_ids\"] = torch.cat(\n                [position_ids, new_position_id], dim=-1\n            )\n\n        model_kwargs[\"is_first_forward\"] = False\n        return model_kwargs\n\n    def prepare_inputs_for_generation(\n            self,\n            input_ids: torch.LongTensor,\n            past_key_values: Optional[torch.Tensor] = None,\n            attention_mask: Optional[torch.Tensor] = None,\n            position_ids: Optional[torch.Tensor] = None,\n            use_cache: Optional[bool] = None,\n            is_first_forward: bool = True,\n            **kwargs\n    ) -> dict:\n        # only last token for input_ids if past is not None\n        if position_ids is None:\n            position_ids = self.get_position_ids(input_ids, device=input_ids.device)\n        if not is_first_forward:\n            if past_key_values is not None:\n                position_ids = position_ids[..., -1:]\n                input_ids = input_ids[:, -1:]\n        return {\n            \"input_ids\": input_ids,\n            \"past_key_values\": past_key_values,\n            \"position_ids\": position_ids,\n            \"attention_mask\": attention_mask,\n            \"return_last_logit\": True,\n            \"use_cache\": use_cache\n        }\n\n    def forward(\n            self,\n            input_ids: Optional[torch.Tensor] = None,\n            position_ids: Optional[torch.Tensor] = None,\n            attention_mask: Optional[torch.Tensor] = None,\n            past_key_values: Optional[Tuple[torch.FloatTensor]] = None,\n            inputs_embeds: Optional[torch.Tensor] = None,\n            labels: Optional[torch.Tensor] = None,\n            use_cache: Optional[bool] = None,\n            output_attentions: Optional[bool] = None,\n            output_hidden_states: Optional[bool] = None,\n            return_dict: Optional[bool] = None,\n            return_last_logit: Optional[bool] = False,\n    ):\n        use_cache = use_cache if use_cache is not None else self.config.use_cache\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n        transformer_outputs = self.transformer(\n            input_ids=input_ids,\n            position_ids=position_ids,\n            attention_mask=attention_mask,\n            past_key_values=past_key_values,\n            inputs_embeds=inputs_embeds,\n            use_cache=use_cache,\n            output_hidden_states=output_hidden_states,\n            return_dict=return_dict,\n        )\n\n        hidden_states = transformer_outputs[0]\n        if return_last_logit:\n            hidden_states = hidden_states[-1:]\n        lm_logits = self.transformer.output_layer(hidden_states)\n        lm_logits = lm_logits.transpose(0, 1).contiguous()\n\n        loss = None\n        if labels is not None:\n            lm_logits = lm_logits.to(torch.float32)\n\n            # Shift so that tokens < n predict n\n            shift_logits = lm_logits[..., :-1, :].contiguous()\n            shift_labels = labels[..., 1:].contiguous()\n            # Flatten the tokens\n            loss_fct = CrossEntropyLoss(ignore_index=-100)\n            loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))\n\n            lm_logits = lm_logits.to(hidden_states.dtype)\n            loss = loss.to(hidden_states.dtype)\n\n        if not return_dict:\n            output = (lm_logits,) + transformer_outputs[1:]\n            return ((loss,) + output) if loss is not None else output\n\n        return CausalLMOutputWithPast(\n            loss=loss,\n            logits=lm_logits,\n            past_key_values=transformer_outputs.past_key_values,\n            hidden_states=transformer_outputs.hidden_states,\n            attentions=transformer_outputs.attentions,\n        )\n\n    @staticmethod\n    def _reorder_cache(\n            past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor\n    ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:\n        \"\"\"\n        This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or\n        [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct\n        beam_idx at every generation step.\n\n        Output shares the same memory storage as `past`.\n        \"\"\"\n        return tuple(\n            (\n                layer_past[0].index_select(1, beam_idx.to(layer_past[0].device)),\n                layer_past[1].index_select(1, beam_idx.to(layer_past[1].device)),\n            )\n            for layer_past in past\n        )\n\n    def process_response(self, output, history):\n        content = \"\"\n        history = deepcopy(history)\n        for response in output.split(\"<|assistant|>\"):\n            if \"\\n\" in response:\n                metadata, content = response.split(\"\\n\", maxsplit=1)\n            else:\n                metadata, content = \"\", response\n            if not metadata.strip():\n                content = content.strip()\n                history.append({\"role\": \"assistant\", \"metadata\": metadata, \"content\": content})\n                content = content.replace(\"[[训练时间]]\", \"2023年\")\n            else:\n                history.append({\"role\": \"assistant\", \"metadata\": metadata, \"content\": content})\n                if history[0][\"role\"] == \"system\" and \"tools\" in history[0]:\n                    content = \"\\n\".join(content.split(\"\\n\")[1:-1])\n                    def tool_call(**kwargs):\n                        return kwargs\n                    parameters = eval(content)\n                    content = {\"name\": metadata.strip(), \"parameters\": parameters}\n                else:\n                    content = {\"name\": metadata.strip(), \"content\": content}\n        return content, history\n\n    @torch.inference_mode()\n    def chat(self, tokenizer, query: str, history: List[Dict] = None, role: str = \"user\",\n             max_length: int = 8192, num_beams=1, do_sample=True, top_p=0.8, temperature=0.8, logits_processor=None,\n             **kwargs):\n        if history is None:\n            history = []\n        if logits_processor is None:\n            logits_processor = LogitsProcessorList()\n        logits_processor.append(InvalidScoreLogitsProcessor())\n        gen_kwargs = {\"max_length\": max_length, \"num_beams\": num_beams, \"do_sample\": do_sample, \"top_p\": top_p,\n                      \"temperature\": temperature, \"logits_processor\": logits_processor, **kwargs}\n        inputs = tokenizer.build_chat_input(query, history=history, role=role)\n        inputs = inputs.to(self.device)\n        eos_token_id = [tokenizer.eos_token_id, tokenizer.get_command(\"<|user|>\"),\n                        tokenizer.get_command(\"<|observation|>\")]\n        outputs = self.generate(**inputs, **gen_kwargs, eos_token_id=eos_token_id)\n        outputs = outputs.tolist()[0][len(inputs[\"input_ids\"][0]):-1]\n        response = tokenizer.decode(outputs)\n        history.append({\"role\": role, \"content\": query})\n        response, history = self.process_response(response, history)\n        return response, history\n\n    @torch.inference_mode()\n    def stream_chat(self, tokenizer, query: str, history: List[Dict] = None, role: str = \"user\",\n                    past_key_values=None,max_length: int = 8192, do_sample=True, top_p=0.8, temperature=0.8,\n                    logits_processor=None, return_past_key_values=False, **kwargs):\n        if history is None:\n            history = []\n        if logits_processor is None:\n            logits_processor = LogitsProcessorList()\n        logits_processor.append(InvalidScoreLogitsProcessor())\n        eos_token_id = [tokenizer.eos_token_id, tokenizer.get_command(\"<|user|>\"),\n                        tokenizer.get_command(\"<|observation|>\")]\n        gen_kwargs = {\"max_length\": max_length, \"do_sample\": do_sample, \"top_p\": top_p,\n                      \"temperature\": temperature, \"logits_processor\": logits_processor, **kwargs}\n        if past_key_values is None:\n            inputs = tokenizer.build_chat_input(query, history=history, role=role)\n        else:\n            inputs = tokenizer.build_chat_input(query, role=role)\n        inputs = inputs.to(self.device)\n        if past_key_values is not None:\n            past_length = past_key_values[0][0].shape[0]\n            if self.transformer.pre_seq_len is not None:\n                past_length -= self.transformer.pre_seq_len\n            inputs.position_ids += past_length\n            attention_mask = inputs.attention_mask\n            attention_mask = torch.cat((attention_mask.new_ones(1, past_length), attention_mask), dim=1)\n            inputs['attention_mask'] = attention_mask\n        history.append({\"role\": role, \"content\": query})\n        for outputs in self.stream_generate(**inputs, past_key_values=past_key_values,\n                                            eos_token_id=eos_token_id, return_past_key_values=return_past_key_values,\n                                            **gen_kwargs):\n            if return_past_key_values:\n                outputs, past_key_values = outputs\n            outputs = outputs.tolist()[0][len(inputs[\"input_ids\"][0]):-1]\n            response = tokenizer.decode(outputs)\n            if response and response[-1] != \"�\":\n                response, new_history = self.process_response(response, history)\n                if return_past_key_values:\n                    yield response, new_history, past_key_values\n                else:\n                    yield response, new_history\n\n    @torch.inference_mode()\n    def stream_generate(\n            self,\n            input_ids,\n            generation_config: Optional[GenerationConfig] = None,\n            logits_processor: Optional[LogitsProcessorList] = None,\n            stopping_criteria: Optional[StoppingCriteriaList] = None,\n            prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,\n            return_past_key_values=False,\n            **kwargs,\n    ):\n        batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]\n\n        if generation_config is None:\n            generation_config = self.generation_config\n        generation_config = copy.deepcopy(generation_config)\n        model_kwargs = generation_config.update(**kwargs)\n        model_kwargs[\"use_cache\"] = generation_config.use_cache\n        bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id\n\n        if isinstance(eos_token_id, int):\n            eos_token_id = [eos_token_id]\n        eos_token_id_tensor = torch.tensor(eos_token_id).to(input_ids.device) if eos_token_id is not None else None\n\n        has_default_max_length = kwargs.get(\"max_length\") is None and generation_config.max_length is not None\n        if has_default_max_length and generation_config.max_new_tokens is None:\n            warnings.warn(\n                f\"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. \"\n                \"This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we\"\n                \" recommend using `max_new_tokens` to control the maximum length of the generation.\",\n                UserWarning,\n            )\n        elif generation_config.max_new_tokens is not None:\n            generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length\n            if not has_default_max_length:\n                logger.warn(\n                    f\"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=\"\n                    f\"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. \"\n                    \"Please refer to the documentation for more information. \"\n                    \"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)\",\n                    UserWarning,\n                )\n\n        if input_ids_seq_length >= generation_config.max_length:\n            input_ids_string = \"decoder_input_ids\" if self.config.is_encoder_decoder else \"input_ids\"\n            logger.warning(\n                f\"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to\"\n                f\" {generation_config.max_length}. This can lead to unexpected behavior. You should consider\"\n                \" increasing `max_new_tokens`.\"\n            )\n\n        # 2. Set generation parameters if not already defined\n        logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()\n        stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()\n\n        logits_processor = self._get_logits_processor(\n            generation_config=generation_config,\n            input_ids_seq_length=input_ids_seq_length,\n            encoder_input_ids=input_ids,\n            prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,\n            logits_processor=logits_processor,\n        )\n\n        stopping_criteria = self._get_stopping_criteria(\n            generation_config=generation_config, stopping_criteria=stopping_criteria\n        )\n        logits_warper = self._get_logits_warper(generation_config)\n\n        unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)\n        scores = None\n        while True:\n            model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)\n            # forward pass to get next token\n            outputs = self(\n                **model_inputs,\n                return_dict=True,\n                output_attentions=False,\n                output_hidden_states=False,\n            )\n\n            next_token_logits = outputs.logits[:, -1, :]\n\n            # pre-process distribution\n            next_token_scores = logits_processor(input_ids, next_token_logits)\n            next_token_scores = logits_warper(input_ids, next_token_scores)\n\n            # sample\n            probs = nn.functional.softmax(next_token_scores, dim=-1)\n            if generation_config.do_sample:\n                next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)\n            else:\n                next_tokens = torch.argmax(probs, dim=-1)\n            # update generated ids, model inputs, and length for next step\n            input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)\n            model_kwargs = self._update_model_kwargs_for_generation(\n                outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder\n            )\n            unfinished_sequences = unfinished_sequences.mul(\n                next_tokens.tile(eos_token_id_tensor.shape[0], 1).ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0)\n            )\n            if return_past_key_values:\n                yield input_ids, outputs.past_key_values\n            else:\n                yield input_ids\n            # stop when each sentence is finished, or if we exceed the maximum length\n            if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):\n                break\n\n    def quantize(self, bits: int, empty_init=False, device=None, **kwargs):\n        if bits == 0:\n            return\n\n        from .quantization import quantize\n\n        if self.quantized:\n            logger.info(\"Already quantized.\")\n            return self\n\n        self.quantized = True\n\n        self.config.quantization_bit = bits\n\n        self.transformer.encoder = quantize(self.transformer.encoder, bits, empty_init=empty_init, device=device,\n                                            **kwargs)\n        return self\n\n\nclass ChatGLMForSequenceClassification(ChatGLMPreTrainedModel):\n    def __init__(self, config: ChatGLMConfig, empty_init=True, device=None):\n        super().__init__(config)\n\n        self.num_labels = config.num_labels\n        self.transformer = ChatGLMModel(config, empty_init=empty_init, device=device)\n\n        self.classifier_head = nn.Linear(config.hidden_size, config.num_labels, bias=True, dtype=torch.half)\n        if config.classifier_dropout is not None:\n            self.dropout = nn.Dropout(config.classifier_dropout)\n        else:\n            self.dropout = None\n        self.config = config\n\n        if self.config.quantization_bit:\n            self.quantize(self.config.quantization_bit, empty_init=True)\n\n    def forward(\n            self,\n            input_ids: Optional[torch.LongTensor] = None,\n            position_ids: Optional[torch.LongTensor] = None,\n            attention_mask: Optional[torch.Tensor] = None,\n            full_attention_mask: Optional[torch.Tensor] = None,\n            past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,\n            inputs_embeds: Optional[torch.LongTensor] = None,\n            labels: Optional[torch.LongTensor] = None,\n            use_cache: Optional[bool] = None,\n            output_hidden_states: Optional[bool] = None,\n            return_dict: Optional[bool] = None,\n    ) -> Union[Tuple[torch.Tensor, ...], SequenceClassifierOutputWithPast]:\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n        transformer_outputs = self.transformer(\n            input_ids=input_ids,\n            position_ids=position_ids,\n            attention_mask=attention_mask,\n            full_attention_mask=full_attention_mask,\n            past_key_values=past_key_values,\n            inputs_embeds=inputs_embeds,\n            use_cache=use_cache,\n            output_hidden_states=output_hidden_states,\n            return_dict=return_dict,\n        )\n\n        hidden_states = transformer_outputs[0]\n        pooled_hidden_states = hidden_states[-1]\n        if self.dropout is not None:\n            pooled_hidden_states = self.dropout(pooled_hidden_states)\n        logits = self.classifier_head(pooled_hidden_states)\n\n        loss = None\n        if labels is not None:\n            if self.config.problem_type is None:\n                if self.num_labels == 1:\n                    self.config.problem_type = \"regression\"\n                elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n                    self.config.problem_type = \"single_label_classification\"\n                else:\n                    self.config.problem_type = \"multi_label_classification\"\n\n            if self.config.problem_type == \"regression\":\n                loss_fct = MSELoss()\n                if self.num_labels == 1:\n                    loss = loss_fct(logits.squeeze().float(), labels.squeeze())\n                else:\n                    loss = loss_fct(logits.float(), labels)\n            elif self.config.problem_type == \"single_label_classification\":\n                loss_fct = CrossEntropyLoss()\n                loss = loss_fct(logits.view(-1, self.num_labels).float(), labels.view(-1))\n            elif self.config.problem_type == \"multi_label_classification\":\n                loss_fct = BCEWithLogitsLoss()\n                loss = loss_fct(logits.float(), labels.view(-1, self.num_labels))\n\n        if not return_dict:\n            output = (logits,) + transformer_outputs[1:]\n            return ((loss,) + output) if loss is not None else output\n\n        return SequenceClassifierOutputWithPast(\n            loss=loss,\n            logits=logits,\n            past_key_values=transformer_outputs.past_key_values,\n            hidden_states=transformer_outputs.hidden_states,\n            attentions=transformer_outputs.attentions,\n        )\n"
  },
  {
    "path": "chatglm3/quantization.py",
    "content": "from torch.nn import Linear\nfrom torch.nn.parameter import Parameter\n\nimport bz2\nimport torch\nimport base64\nimport ctypes\nfrom transformers.utils import logging\n\nfrom typing import List\nfrom functools import partial\n\nlogger = logging.get_logger(__name__)\n\ntry:\n    from cpm_kernels.kernels.base import LazyKernelCModule, KernelFunction, round_up\n\n    class Kernel:\n        def __init__(self, code: bytes, function_names: List[str]):\n            self.code = code\n            self._function_names = function_names\n            self._cmodule = LazyKernelCModule(self.code)\n\n            for name in self._function_names:\n                setattr(self, name, KernelFunction(self._cmodule, name))\n\n    quantization_code = \"$QlpoOTFBWSZTWU9yuJUAQHN//////////f/n/8/n///n//bt4dTidcVx8X3V9FV/92/v4B7/AD5FBQFAAAChSgKpFCFAFVSigUAAAEKhSgUUqgFBKigqVREQAABQBQIANDTTIGI00BkZBkNGE0A0BkBkGQGRkaNAaAGQNBoGgDIAAYIGTI0DQAQAaGmmQMRpoDIyDIaMJoBoDIDIMgMjI0aA0AMgaDQNAGQAAwQMmRoGgAgA0NNMgYjTQGRkGQ0YTQDQGQGQZAZGRo0BoAZA0GgaAMgABggZMjQNABABoaaZAxGmgMjIMhowmgGgMgMgyAyMjRoDQAyBoNA0AZAADBAyZGgaAAmqU1NEgJqnptU/Sn4jRR6J6epk2pqb1Q/SgAPUGgyNNGjQ2SBpoAZAAGg0NB6mgDIAAAAA2oaApSREBNAARhGiYEaEwU8pvImlP0k2aam1GaGqbFNM1MHpTwmkepmyU9R6nqPKekHqNNPUxNGhp6n6p6QaZ6o9TG1GMqcoV9ly6nRanHlq6zPNbnGZNi6HSug+2nPiZ13XcnFYZW+45W11CumhzYhchOJ2GLLV1OBjBjGf4TptOddTSOcVxhqYZMYwZXZZY00zI1paX5X9J+b+f4e+x43RXSxXPOdquiGpduatGyXneN696M9t4HU2eR5XX/kPhP261NTx3JO1Ow7LyuDmeo9a7d351T1ZxnvnrvYnrXv/hXxPCeuYx2XsNmO003eg9J3Z6U7b23meJ4ri01OdzTk9BNO96brz+qT5nuvvH3ds/G+m/JcG/F2XYuhXlvO+jP7U3XgrzPN/lr8Sf1n6j4j7jZs+s/T0tNaNNYzTs12rxjwztHlnire3Nzc3N1wuBwOBwXBvZfoHpD7rFmR99V5vj3aXza3xdBbXMalubTg/jIv5dfAi54Pdc75j4z412n3Npj3Ld/ENm7a3b/Cod6h/ret1/5vn/C+l+gdslMvgPSLJ8d8q+U66fevYn/tW1chleEtNTGlcHCbLRlq0tHzF5tsbbZZfHjjLgZu42XCuC3NrdjTasZGNzgxPIrGqp7r3p7L2p5XjnpPSmTd5XtzqnB6U87zzg1Ol0zd0zsLszxR6lkxp35u6/teL0L0W922cR7Lu1lpL9CsHirzuM2T+BgsyViT6LHcm0/Vr6U/7LGGyJeqTEjt0PHWhF5mCT7R9mtlDwriYv0Tyr/OxYt6qp5r0mPVT0608TqnqMZaarU2nFwrTzzlrs1ed7z1ux60wyr4ydCaTi3enW8x68x0zU7tXSlcmPSW1mGpWJMg4zmPC2lK96tp0OE80y4MfEvnZj8zGluR6b22ki1Ou9V2nCd9xovcPvcYMZYy0lvN60ScZ45vN6yeCeeXFb1lVjnnCar5fwXwE2bzJ4HI1XVPXfXZMm44GUsMpYsmLB65TuVdm0cl0b+i/wGNN66XjeV7zuPpHcnK/juhhjdfId5jMdE5nN0dGmmm2zZs2cexD5n9p/dY352XsvXHaZNWWsmmS1atjR452nYudzvqv2HMRyvNNnlMcDl3R2+yx2uVrBubTW9icHDVtbNXlZm7jma1rM4VurZZd2y6nUau7ZXZ7bVU+mnoOVxZGMrVmvX60605JwmzGZhhhjTWtaaaMaaGTGmNMZasY0iX8VMUl8eepaIrzGSpemWOQyZORk2bNpjUybMmxqYmknCGCFynutfksaZpjTNMaaatM0xsxcGR0sociNqxNSmhhR1ZJPbsn8qyF0t2qH6iYBclclalbtTTcHTDsPaX6rlnElph2Jyumumtynv2Kk8GI7rsvXbIcJgHJOSaSXnnGaI3m87RtVXJOZ/YtgdTE6Wpha6ZlE8ayXkef1fh602r2WwvfMXtMdLlkfnLFdYYwYso+bWqm7yJqHXZGw2nrS5ZanSYnWlxBxMF1V940K2wdrI7R6OYf7DGGamMmTSbRhlS45xmVOumF1EyPCmHrrN8wwZOOrdNtLeMtzFzDlWnfTBxMk2NaXIZHBYxYLD4w8yju0ao65Vz1OIXoS9dLanwCe1PWrYuWMqf1if1z2k2yYfKJ741PDgno1ZQ8DRqvUny3mNoWTzGO6m1DkrJI8JiR5cSd+vZdGOO8nrMoc5+NDUFsMSXaZJeNlMmGLtJsovOsUp7I9S5VojKxF6bTVEelXqlfJobQr3LozSh2Jk7VcrVMfhXqszGWMzNqGhqZY0OadxkyyMssKugZR0KNFXBHlqwmJgTE/BNVMk6ItJXZMR0H47GpXv/DMOvNkmVuaV1PRfEdxuqc7Hcd+ZV/zTLaRxWk0nl9CdCeM6mn5rstHIBcpiuwmUZXeq81DacHI2rmrZ5SuE5mOZd6LQrZg9mx32TprA8BMo5jKN6yLTCi3WzQaZSuhzTtM1fUTGVpG8Tw+KXI0tjEpiWxtLYynOlktSbVlaI5kxP8TDH8kx50xoxi5KcA4pcja8KWLRlO/Ks6q06ergnvm1ca3Tq8Uw7LTUsmWyctXPWmpitl/uvGcWTGXGuAXDfhqazGmjkxcJW5hMMMMpYsXl2TZYtVOddG3XCarUt6Ptq9CZXSNzyuRzqRZOjsxdBbFVz6OA5HI43r1jityVlVpVkxmOsyaYWE1NTGq1sOVh36mHMcxtSvcy70edG0ZGR3I1Go1GRlV7mWWo1G0ZGRqlvH40l7o4m5xMWLLLYyNjnqc8556mdPqLJ31n/1nWOncxzG1tizrHs/Z+d2vP/B/l8wdJ6rHUn2nbbDq4p6htFtYzMMMTaZis1K5GKzGNmxhmUx2DDlZ/qNnIx41xnaMfCZWYaZWtNLTNW8ND4Fw1MyZOCdM428suKG1ehW8TesOydg7J+YYcD4cYR+8dFK6M4E3HM9ZfRNNL+Sn6rsl4DsrDl2HpPCnfxjGXtbZtYys1ttlyJ4T+BvexjGWRjMszK4Jpc77D3GyuVD7q0+G8m9G+2+rGm7cOR2y7FdtY2XUYx/oNlfRYxhMYyYZkyyg55enna9Kt/FFi6GMMwYwdwxWgxGMLKYmUyGExTKMZkMFhkymKuh0NOBNnBu+23LdwDoZYYzGGMxtORaTU1pjTGWTTGGtMrNWUsyyTTLLG1qy2ZjbK2DBllWqxMtBMaYZQmcE7zvvRcTkclUwdkxTaSdyySt/7fpL+T1v516Ji97fwr5JbLu305zMn5+GMTTZ9F+y7ExwmGVfG44yxn3dLv6l5i+Wth1jCrDq21nW9LqvvDzz3Vf3LLH/O/32TJ/erx3bXftO4eF+G956D952K/An4NfvOpjFjExjevP/UmE0fIoZXx6/w6lX/no3D0bLt+ixjieBM6ksRd0yB4Lt2SwYNE+gd1detlZWUnpiZfGfFaK+4PyCa/v18V8X75pe9fLXzp7l3VjF76vWZmHwGz1IZNWT7b8yddJ4q5kyrVdfru6atWc7bVYztL9Jf4GXvT+Y8m9/YsXP6H018a8D4XVOqvfzqeR+6yZOD8dPv0+U7/q5Pl+2dNb0MjzGVH5p6MNQ7cOWvw62U9aHE8DprDek+McLyvDz+te+9Zhq5+YTruufMcWMabqysTmZVWjKPfnK0wyVcrsuhjZRdLkHNvD72b9abriOSGIxiLixMOoalNPXzy+wT/tf+U6HHONfsz+xe8ufHBdQWWGWLA9if0rsnmrxK5LvRZQeWsTCsrmOYy8VteVfuRfcVTtDLItLIsMYxZLdU/DbtSemxF6Z6Zo5WBXE4tFdCyVMMXMTEMZXVlS6Xec2T4e0tHsRcEuWshcJ2YsNF5rUx1E8ifCq6Z+ZP7qdCeu/aTwFd53l16/o0NOw6O3dLavP4Hbi4RdmuDk6DoYaninC0+o4uZjbJ7Rxeu0/FbuFg+q7DVS6fQe0rZ6NDGUNNU6DEqOaLTicKnYZMnBWruljQxoaS3dZhocDge0bSTyOvdAbG5hxe2xji7E/L55xX13wWNDi6HCekcFxfCPGxY0MXC+s7afWaMdDyjyr+o8Rudm/NabOZvdl274zH4f5XK9z6On1Pe/K5TdPAslg77BjuO6Y3eO7GqvOPG/stknp1leyvLL0Z7bl9I4noMvLkzytLhWYzrOZzLXCORe028rORzOg4N/L0HlMOQ3Pgmnbb6KczlabORpu980q37TBqRu0/p3PO6234Bl03Ynuz+9W7gnsEcmvYaYY3aMYY0wx3pYd+ujsXauWdaY5Xkbtl23fPzFHiDB/QMo0yFjBllYxTQYYyxkrwn7JufwJ/PfgJ+C83X69ni6zvXcnyXabv0ncbLwsceS+RNlyN2mnneJtX0ngYO0+e+0+UnA+Wch3ji8hj5an4h+i6XBySU4n+R0roVcbw5yvHrmr4Yw8Y7x6c+9POPYHI5HI5HI5HI5HGXGww4nE4nrVyOR8XeqPEO7PLOiukYa3Novk5hV4cdtYZLI93e+uxff2jRo0aNGjRo0aNG1bVtW1dy3m83m8+tQ5ZzHw3nObwOu8La9Rc1dtkdS8A3eTk823tnktXWlxN6Oixe06zrN70Isd9jiOgZFq9yfkPqP/SLhN2Myl8jDM43bl1nbcb4cO57jlh8Jow6pzXZdL4dyODTuuhu77FyO27DdwdRxmvO+O+3N2+BdqyTwLHVczDVY4UPE4O66/ZO2cx1LFzVdSXtF7G4HMbrauOHRw6c8FdZ5m9fHZHYZXfTlZquyynSyTTKke6vcffSD9pzPA/G7n7jxPmuhc1DHMynPMrGL6AdewYmwu5ko+UUyTwrMv27rPH1v1nGqd87+p6N6LU8k3NEng53xXyHS97+44OSg/sy/hn+Se6yfYNjW0/uTgP+PvWYzLMmjhcLB/gGpri6H83/84eUXWT6T9Hsv7785z/7z4icpW+zfXypuR7rx/gMdZb1/wC678pcs8/2a3mDitGHxl9mfPlll5MafWWqxk/eYuTDgcNMzDGWLWvsuglNxs53GtN6uWpktlW1tZZYcuinMMWmnNnJydze3b2Y1McBxrBkXw799izLMZZYyy0TkbsGM4p03S2uVu5s/XXUdSdec6smVxZYYGpVmT8A+8ajuEyV5FatkvVru2x6uxGXXbH4A+jvgP4GMYy3iPLXzq/6z65+E005ey+cwMZD3fZcqc6xpjTFjQ0P3U+e++cPYmTIwj0nrK5NPTfl3WvpfLtXDcb2HQMudYOxFXQBor4L4T6vrOauFctYXJQ++NUWmJe5bmx1jDiZS1dTqWxo4GR8jm3fttpmPHppk9PEyv4/y8/sO07XacOmcqc0x2Vi9BvNJvN5oW8x4mOsydpidRxMYJPx06m1bqPzq9KtK8sxXNXFodD/+MYYaJTLwOhc9brCsV18oOR1i4tXChyTkq4lf4y1Ke+9axjDHqs1mfBbMXuP4Hzi+X7t8vzv7bHerrUPgPCxhjre4fXdfLNtNM+Jd+Zdh8xd8wP87uNPoPgv4W7/5P2BuxfsMabNnMnza+54Pdi5U671GPZY8CehX8Voeoo7FHpkeEc6715FwHZrIrUrHaviPUbPZHND+IhczrP6FcYvhOZ0Di/ETt0OI+YwNWR9r7tpf6WDeZKZDB1+z2IthOl1mPyb5FluvEx9h9d0NnM0Y1XPFkWIsk1WotJ0PBMmkvjvQTd0e71tfeV+8r8lQ/tpzpsmxJ+InrI/dj2UajUajVTUajatRqNRtGo1Go1Go4wjeMpZFMVV9CHbofPraLsJ3JpWV2XOoanCuFky4y3PPNxucK2uKC1Lbdb1eo+m5XomN6HfeZsabHLHRX/K+offtNGGmHWctcVcG44MdSqsOLY9VzX+Zxfxn2HPdWTpzWvkrtJ8M5zorrKcquRytJ5N5DZmcaW02l76nWO+BqPXm1A2Ry/0q71dH/mqrqeFjkYxjEXtsX8qubTk67rGycyqsdm4tZx5D6D5hhi0waaWmiaMP81Yjii5qxPlPuU/GfTL1Y5E6Jyfiq63qTa39A4J0sOGDgO9WF9bOXl0XfPRbsY2bPNKPy1YrFYrFYmRhhlTIyMjJWJYZHXuCXI8OoXsvfljGLFicNifpp2XunoPiG1wtx3p1Tah+/DD66OnVtVXP9rKbVxOnL0tR/rHtqB5UDErUVcl11D4qqvjpOcxX7armUNJB3LpW6bxVvD08e8h3odKKvyCFZBdSh2FVcST9xV3n3T8t1j7Kr9qgrqXg+13Pt5U7JCvFXVIV1YG5lRhkVYZJYYDDD4KOIMoHCp26WS8GB7uBh2zIdgq/PKyInjV2STShuoapUdCpX1yTwqq/z1VvET7Kh5nVPkO8YyxjLt2MaaMmWTLQvx3qnzltnXW0p2jxgbEtSny/Osv8Y9pLMXYoHVPAhkVdWVeODhR6q9/Sxe2liwwZWMVvFXfRkeIDxAePUPIrdJ4ey6yquzH+PD/bUOWAu05qVHtFd8rrKHSoeNIOUqrYr3FXyToqfYJgwmJdKpXXOwYYegNNGMzfZPp/t3t/DVs4zjNTN61rRqaWaa4NYbRjTa0tWwy2Y2tGN8ZO8ofNKq4j9SL7I+cSm4/6ovLV5HNXLI0jJidwrtk6ynCaP6Z++GjRlWS3tLeW129Mi9evxU9mtz6s5J3Z7M2ngTgnKvmpomxpaLCzPfmx0JWE+m3NLDDGOX47RctdYYNK5jakdqLkRlI39n590T5zctGSwwZZDJj6kW8XSi6ot2MmWWJ0DUT3nuvebBudScjZ79g8cWJ8av0k+/bE5WKd5MdbFpbDVMxu1DVMmtNZGJvq1mtRbn6M+g/kP0FwDwr7quZs7xosNGpbscyxhhd9TyJyFwbLcxlTasg75vW7TsV5K7ji44XPMMrdoj+Y3rT0Hie62nlYV/pwczzOmdLqLhYkzGMzCZWGMQzGMSsZYY6Di1t4nlJ+Em63mJxrVLxPbYxNEdgc1dU2iOKyoYYWjNrEeHTYybVk0atSa7ehuwsWMWTqn1TrnS6hYsi71d1+s+k+ic70e20fzE/VaTdxT9ZtU4GIXdeNx3X77guYYfpHeTQjaMX6brOu4OY4K7Y2d9mbHarI5ox3p4GpJ2Vd/Tst60f7j999pppjR+Q/Qf8J/VaORs3cji7FfFuN61+ui9s8hix1OCh5KGVV23BPXvZfz3CLyHpix+exi8z/KnCnosY2eunor+cxyPO/xJ0vKey9OvE9VjqaYu0x3Z3jd6o2b1T12D+F8l232lwaaacD5LE8LBxu7WTlbWraWpew8Xexjel3E+wWD4APITdNqR8F3R3T0lunCQ4GaE9R37DxeCYfcHi4xci5ovKfxVs55y2hf+65E/Xdp6jR5nrebTmi5incpkyOjs50JvrZwstbbW6kfuuQw+2mykf/EXNFzxfKTrxew929TR6bWnGL//F3JFOFCQT3K4lQ\"\n\n    kernels = Kernel(\n        bz2.decompress(base64.b64decode(quantization_code)),\n        [\n            \"int4WeightCompression\",\n            \"int4WeightExtractionFloat\",\n            \"int4WeightExtractionHalf\",\n            \"int8WeightExtractionFloat\",\n            \"int8WeightExtractionHalf\",\n        ],\n    )\nexcept Exception as exception:\n    kernels = None\n    logger.warning(\"Failed to load cpm_kernels:\" + str(exception))\n\n\nclass W8A16Linear(torch.autograd.Function):\n    @staticmethod\n    def forward(ctx, inp: torch.Tensor, quant_w: torch.Tensor, scale_w: torch.Tensor, weight_bit_width):\n        ctx.inp_shape = inp.size()\n        ctx.weight_bit_width = weight_bit_width\n        out_features = quant_w.size(0)\n        inp = inp.contiguous().view(-1, inp.size(-1))\n        weight = extract_weight_to_half(quant_w, scale_w, weight_bit_width)\n        ctx.weight_shape = weight.size()\n        output = inp.mm(weight.t())\n        ctx.save_for_backward(inp, quant_w, scale_w)\n        return output.view(*(ctx.inp_shape[:-1] + (out_features,)))\n\n    @staticmethod\n    def backward(ctx, grad_output: torch.Tensor):\n        inp, quant_w, scale_w = ctx.saved_tensors\n        weight = extract_weight_to_half(quant_w, scale_w, ctx.weight_bit_width)\n        grad_output = grad_output.contiguous().view(-1, weight.size(0))\n        grad_input = grad_output.mm(weight)\n        grad_weight = grad_output.t().mm(inp)\n        return grad_input.view(ctx.inp_shape), grad_weight.view(ctx.weight_shape), None, None\n\n\ndef compress_int4_weight(weight: torch.Tensor):  # (n, m)\n    with torch.cuda.device(weight.device):\n        n, m = weight.size(0), weight.size(1)\n        assert m % 2 == 0\n        m = m // 2\n        out = torch.empty(n, m, dtype=torch.int8, device=\"cuda\")\n        stream = torch.cuda.current_stream()\n\n        gridDim = (n, 1, 1)\n        blockDim = (min(round_up(m, 32), 1024), 1, 1)\n\n        kernels.int4WeightCompression(\n            gridDim,\n            blockDim,\n            0,\n            stream,\n            [ctypes.c_void_p(weight.data_ptr()), ctypes.c_void_p(out.data_ptr()), ctypes.c_int32(n), ctypes.c_int32(m)],\n        )\n        return out\n\n\ndef extract_weight_to_half(weight: torch.Tensor, scale_list: torch.Tensor, source_bit_width: int):\n    assert scale_list.dtype in [torch.half, torch.bfloat16]\n    assert weight.dtype in [torch.int8]\n    if source_bit_width == 8:\n        return weight.to(scale_list.dtype) * scale_list[:, None]\n    elif source_bit_width == 4:\n        func = (\n            kernels.int4WeightExtractionHalf if scale_list.dtype == torch.half else kernels.int4WeightExtractionBFloat16\n        )\n    else:\n        assert False, \"Unsupported bit-width\"\n\n    with torch.cuda.device(weight.device):\n        n, m = weight.size(0), weight.size(1)\n        out = torch.empty(n, m * (8 // source_bit_width), dtype=scale_list.dtype, device=\"cuda\")\n        stream = torch.cuda.current_stream()\n\n        gridDim = (n, 1, 1)\n        blockDim = (min(round_up(m, 32), 1024), 1, 1)\n\n        func(\n            gridDim,\n            blockDim,\n            0,\n            stream,\n            [\n                ctypes.c_void_p(weight.data_ptr()),\n                ctypes.c_void_p(scale_list.data_ptr()),\n                ctypes.c_void_p(out.data_ptr()),\n                ctypes.c_int32(n),\n                ctypes.c_int32(m),\n            ],\n        )\n        return out\n\n\nclass QuantizedLinear(torch.nn.Module):\n    def __init__(self, weight_bit_width: int, weight, bias=None, device=\"cpu\", dtype=None, empty_init=False, *args,\n                 **kwargs):\n        super().__init__()\n        self.weight_bit_width = weight_bit_width\n\n        shape = weight.shape\n\n        if weight is None or empty_init:\n            self.weight = torch.empty(shape[0], shape[1] * weight_bit_width // 8, dtype=torch.int8, device=device)\n            self.weight_scale = torch.empty(shape[0], dtype=dtype, device=device)\n        else:\n            self.weight_scale = weight.abs().max(dim=-1).values / ((2 ** (weight_bit_width - 1)) - 1)\n            self.weight = torch.round(weight / self.weight_scale[:, None]).to(torch.int8)\n            if weight_bit_width == 4:\n                self.weight = compress_int4_weight(self.weight)\n\n        self.weight = Parameter(self.weight.to(device), requires_grad=False)\n        self.weight_scale = Parameter(self.weight_scale.to(device), requires_grad=False)\n        self.bias = Parameter(bias.to(device), requires_grad=False) if bias is not None else None\n\n    def forward(self, input):\n        output = W8A16Linear.apply(input, self.weight, self.weight_scale, self.weight_bit_width)\n        if self.bias is not None:\n            output = output + self.bias\n        return output\n\n\ndef quantize(model, weight_bit_width, empty_init=False, device=None):\n    \"\"\"Replace fp16 linear with quantized linear\"\"\"\n    for layer in model.layers:\n        layer.self_attention.query_key_value = QuantizedLinear(\n            weight_bit_width=weight_bit_width,\n            weight=layer.self_attention.query_key_value.weight.to(torch.cuda.current_device()),\n            bias=layer.self_attention.query_key_value.bias,\n            dtype=layer.self_attention.query_key_value.weight.dtype,\n            device=layer.self_attention.query_key_value.weight.device if device is None else device,\n            empty_init=empty_init\n        )\n        layer.self_attention.dense = QuantizedLinear(\n            weight_bit_width=weight_bit_width,\n            weight=layer.self_attention.dense.weight.to(torch.cuda.current_device()),\n            bias=layer.self_attention.dense.bias,\n            dtype=layer.self_attention.dense.weight.dtype,\n            device=layer.self_attention.dense.weight.device if device is None else device,\n            empty_init=empty_init\n        )\n        layer.mlp.dense_h_to_4h = QuantizedLinear(\n            weight_bit_width=weight_bit_width,\n            weight=layer.mlp.dense_h_to_4h.weight.to(torch.cuda.current_device()),\n            bias=layer.mlp.dense_h_to_4h.bias,\n            dtype=layer.mlp.dense_h_to_4h.weight.dtype,\n            device=layer.mlp.dense_h_to_4h.weight.device if device is None else device,\n            empty_init=empty_init\n        )\n        layer.mlp.dense_4h_to_h = QuantizedLinear(\n            weight_bit_width=weight_bit_width,\n            weight=layer.mlp.dense_4h_to_h.weight.to(torch.cuda.current_device()),\n            bias=layer.mlp.dense_4h_to_h.bias,\n            dtype=layer.mlp.dense_4h_to_h.weight.dtype,\n            device=layer.mlp.dense_4h_to_h.weight.device if device is None else device,\n            empty_init=empty_init\n        )\n\n    return model\n"
  },
  {
    "path": "chatglm3/tokenization_chatglm.py",
    "content": "import json\nimport os\nimport re\nfrom typing import List, Optional, Union, Dict\nfrom sentencepiece import SentencePieceProcessor\nfrom transformers import PreTrainedTokenizer\nfrom transformers.utils import logging, PaddingStrategy\nfrom transformers.tokenization_utils_base import EncodedInput, BatchEncoding\n\n\nclass SPTokenizer:\n    def __init__(self, model_path: str):\n        # reload tokenizer\n        assert os.path.isfile(model_path), model_path\n        self.sp_model = SentencePieceProcessor(model_file=model_path)\n\n        # BOS / EOS token IDs\n        self.n_words: int = self.sp_model.vocab_size()\n        self.bos_id: int = self.sp_model.bos_id()\n        self.eos_id: int = self.sp_model.eos_id()\n        self.pad_id: int = self.sp_model.unk_id()\n        assert self.sp_model.vocab_size() == self.sp_model.get_piece_size()\n\n        role_special_tokens = [\"<|system|>\", \"<|user|>\", \"<|assistant|>\", \"<|observation|>\"]\n        special_tokens = [\"[MASK]\", \"[gMASK]\", \"[sMASK]\", \"sop\", \"eop\"] + role_special_tokens\n        self.special_tokens = {}\n        self.index_special_tokens = {}\n        for token in special_tokens:\n            self.special_tokens[token] = self.n_words\n            self.index_special_tokens[self.n_words] = token\n            self.n_words += 1\n        self.role_special_token_expression = \"|\".join([re.escape(token) for token in role_special_tokens])\n\n    def tokenize(self, s: str, encode_special_tokens=False):\n        if encode_special_tokens:\n            last_index = 0\n            t = []\n            for match in re.finditer(self.role_special_token_expression, s):\n                if last_index < match.start():\n                    t.extend(self.sp_model.EncodeAsPieces(s[last_index:match.start()]))\n                t.append(s[match.start():match.end()])\n                last_index = match.end()\n            if last_index < len(s):\n                t.extend(self.sp_model.EncodeAsPieces(s[last_index:]))\n            return t\n        else:\n            return self.sp_model.EncodeAsPieces(s)\n\n    def encode(self, s: str, bos: bool = False, eos: bool = False) -> List[int]:\n        assert type(s) is str\n        t = self.sp_model.encode(s)\n        if bos:\n            t = [self.bos_id] + t\n        if eos:\n            t = t + [self.eos_id]\n        return t\n\n    def decode(self, t: List[int]) -> str:\n        text, buffer = \"\", []\n        for token in t:\n            if token in self.index_special_tokens:\n                if buffer:\n                    text += self.sp_model.decode(buffer)\n                    buffer = []\n                text += self.index_special_tokens[token]\n            else:\n                buffer.append(token)\n        if buffer:\n            text += self.sp_model.decode(buffer)\n        return text\n\n    def decode_tokens(self, tokens: List[str]) -> str:\n        text = self.sp_model.DecodePieces(tokens)\n        return text\n\n    def convert_token_to_id(self, token):\n        \"\"\" Converts a token (str) in an id using the vocab. \"\"\"\n        if token in self.special_tokens:\n            return self.special_tokens[token]\n        return self.sp_model.PieceToId(token)\n\n    def convert_id_to_token(self, index):\n        \"\"\"Converts an index (integer) in a token (str) using the vocab.\"\"\"\n        if index in self.index_special_tokens:\n            return self.index_special_tokens[index]\n        if index in [self.eos_id, self.bos_id, self.pad_id] or index < 0 or index > self.sp_model.vocab_size():\n            return \"\"\n        return self.sp_model.IdToPiece(index)\n\n\nclass ChatGLMTokenizer(PreTrainedTokenizer):\n    vocab_files_names = {\"vocab_file\": \"tokenizer.model\"}\n\n    model_input_names = [\"input_ids\", \"attention_mask\", \"position_ids\"]\n\n    def __init__(self, vocab_file, padding_side=\"left\", clean_up_tokenization_spaces=False, encode_special_tokens=False,\n                 **kwargs):\n        self.name = \"GLMTokenizer\"\n\n        self.vocab_file = vocab_file\n        self.tokenizer = SPTokenizer(vocab_file)\n        self.special_tokens = {\n            \"<bos>\": self.tokenizer.bos_id,\n            \"<eos>\": self.tokenizer.eos_id,\n            \"<pad>\": self.tokenizer.pad_id\n        }\n        self.encode_special_tokens = encode_special_tokens\n        super().__init__(padding_side=padding_side, clean_up_tokenization_spaces=clean_up_tokenization_spaces,\n                         encode_special_tokens=encode_special_tokens,\n                         **kwargs)\n\n    def get_command(self, token):\n        if token in self.special_tokens:\n            return self.special_tokens[token]\n        assert token in self.tokenizer.special_tokens, f\"{token} is not a special token for {self.name}\"\n        return self.tokenizer.special_tokens[token]\n\n    @property\n    def unk_token(self) -> str:\n        return \"<unk>\"\n\n    @property\n    def pad_token(self) -> str:\n        return \"<unk>\"\n\n    @property\n    def pad_token_id(self):\n        return self.get_command(\"<pad>\")\n\n    @property\n    def eos_token(self) -> str:\n        return \"</s>\"\n\n    @property\n    def eos_token_id(self):\n        return self.get_command(\"<eos>\")\n\n    @property\n    def vocab_size(self):\n        return self.tokenizer.n_words\n\n    def get_vocab(self):\n        \"\"\" Returns vocab as a dict \"\"\"\n        vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)}\n        vocab.update(self.added_tokens_encoder)\n        return vocab\n\n    def _tokenize(self, text, **kwargs):\n        return self.tokenizer.tokenize(text, encode_special_tokens=self.encode_special_tokens)\n\n    def _convert_token_to_id(self, token):\n        \"\"\" Converts a token (str) in an id using the vocab. \"\"\"\n        return self.tokenizer.convert_token_to_id(token)\n\n    def _convert_id_to_token(self, index):\n        \"\"\"Converts an index (integer) in a token (str) using the vocab.\"\"\"\n        return self.tokenizer.convert_id_to_token(index)\n\n    def convert_tokens_to_string(self, tokens: List[str]) -> str:\n        return self.tokenizer.decode_tokens(tokens)\n\n    def save_vocabulary(self, save_directory, filename_prefix=None):\n        \"\"\"\n        Save the vocabulary and special tokens file to a directory.\n\n        Args:\n            save_directory (`str`):\n                The directory in which to save the vocabulary.\n            filename_prefix (`str`, *optional*):\n                An optional prefix to add to the named of the saved files.\n\n        Returns:\n            `Tuple(str)`: Paths to the files saved.\n        \"\"\"\n        if os.path.isdir(save_directory):\n            vocab_file = os.path.join(\n                save_directory, self.vocab_files_names[\"vocab_file\"]\n            )\n        else:\n            vocab_file = save_directory\n\n        with open(self.vocab_file, 'rb') as fin:\n            proto_str = fin.read()\n\n        with open(vocab_file, \"wb\") as writer:\n            writer.write(proto_str)\n\n        return (vocab_file,)\n\n    def get_prefix_tokens(self):\n        prefix_tokens = [self.get_command(\"[gMASK]\"), self.get_command(\"sop\")]\n        return prefix_tokens\n\n    def build_single_message(self, role, metadata, message):\n        assert role in [\"system\", \"user\", \"assistant\", \"observation\"], role\n        role_tokens = [self.get_command(f\"<|{role}|>\")] + self.tokenizer.encode(f\"{metadata}\\n\")\n        message_tokens = self.tokenizer.encode(message)\n        tokens = role_tokens + message_tokens\n        return tokens\n\n    def build_chat_input(self, query, history=None, role=\"user\"):\n        if history is None:\n            history = []\n        input_ids = []\n        for item in history:\n            content = item[\"content\"]\n            if item[\"role\"] == \"system\" and \"tools\" in item:\n                content = content + \"\\n\" + json.dumps(item[\"tools\"], indent=4, ensure_ascii=False)\n            input_ids.extend(self.build_single_message(item[\"role\"], item.get(\"metadata\", \"\"), content))\n        input_ids.extend(self.build_single_message(role, \"\", query))\n        input_ids.extend([self.get_command(\"<|assistant|>\")])\n        return self.batch_encode_plus([input_ids], return_tensors=\"pt\", is_split_into_words=True)\n\n    def build_inputs_with_special_tokens(\n            self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None\n    ) -> List[int]:\n        \"\"\"\n        Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\n        adding special tokens. A BERT sequence has the following format:\n\n        - single sequence: `[CLS] X [SEP]`\n        - pair of sequences: `[CLS] A [SEP] B [SEP]`\n\n        Args:\n            token_ids_0 (`List[int]`):\n                List of IDs to which the special tokens will be added.\n            token_ids_1 (`List[int]`, *optional*):\n                Optional second list of IDs for sequence pairs.\n\n        Returns:\n            `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.\n        \"\"\"\n        prefix_tokens = self.get_prefix_tokens()\n        token_ids_0 = prefix_tokens + token_ids_0\n        if token_ids_1 is not None:\n            token_ids_0 = token_ids_0 + token_ids_1 + [self.get_command(\"<eos>\")]\n        return token_ids_0\n\n    def _pad(\n            self,\n            encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],\n            max_length: Optional[int] = None,\n            padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,\n            pad_to_multiple_of: Optional[int] = None,\n            return_attention_mask: Optional[bool] = None,\n    ) -> dict:\n        \"\"\"\n        Pad encoded inputs (on left/right and up to predefined length or max length in the batch)\n\n        Args:\n            encoded_inputs:\n                Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).\n            max_length: maximum length of the returned list and optionally padding length (see below).\n                Will truncate by taking into account the special tokens.\n            padding_strategy: PaddingStrategy to use for padding.\n\n                - PaddingStrategy.LONGEST Pad to the longest sequence in the batch\n                - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)\n                - PaddingStrategy.DO_NOT_PAD: Do not pad\n                The tokenizer padding sides are defined in self.padding_side:\n\n                    - 'left': pads on the left of the sequences\n                    - 'right': pads on the right of the sequences\n            pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.\n                This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability\n                `>= 7.5` (Volta).\n            return_attention_mask:\n                (optional) Set to False to avoid returning attention mask (default: set to model specifics)\n        \"\"\"\n        # Load from model defaults\n        assert self.padding_side == \"left\"\n\n        required_input = encoded_inputs[self.model_input_names[0]]\n        seq_length = len(required_input)\n\n        if padding_strategy == PaddingStrategy.LONGEST:\n            max_length = len(required_input)\n\n        if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):\n            max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of\n\n        needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length\n\n        # Initialize attention mask if not present.\n        if \"attention_mask\" not in encoded_inputs:\n            encoded_inputs[\"attention_mask\"] = [1] * seq_length\n\n        if \"position_ids\" not in encoded_inputs:\n            encoded_inputs[\"position_ids\"] = list(range(seq_length))\n\n        if needs_to_be_padded:\n            difference = max_length - len(required_input)\n\n            if \"attention_mask\" in encoded_inputs:\n                encoded_inputs[\"attention_mask\"] = [0] * difference + encoded_inputs[\"attention_mask\"]\n            if \"position_ids\" in encoded_inputs:\n                encoded_inputs[\"position_ids\"] = [0] * difference + encoded_inputs[\"position_ids\"]\n            encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input\n\n        return encoded_inputs\n"
  },
  {
    "path": "check_bad_cache_files.py",
    "content": "import os\nimport hashlib\n\n# 指定要检查的目录路径\ndirectory_path = os.path.expanduser('~/.cache/huggingface/hub')\n\n# 遍历目录树\nfor root, dirs, files in os.walk(directory_path):\n    for dir_name in dirs:\n        # 检查每个子目录是否名为\"blobs\"\n        if dir_name == \"blobs\":\n            # 如果是，获取该目录的绝对路径\n            blobs_dir_path = os.path.join(root, dir_name)\n            # 遍历blobs目录下的所有文件\n            for file_name in os.listdir(blobs_dir_path):\n                file_path = os.path.join(blobs_dir_path, file_name)\n\n                # 获取文件大小，以字节为单位\n                file_size = os.path.getsize(file_path)\n\n                # 将文件大小转换为MB，并保留两位小数\n                file_size_mb = round(file_size / (1024 * 1024), 2)\n\n                # 判断文件大小是否大于100MB\n                if file_size_mb < 100:\n                    continue\n\n                # 初始化哈希对象\n                hash_obj = hashlib.sha256()\n                # 以二进制模式打开文件\n                with open(file_path, 'rb') as f:\n                    # 逐块读取文件并更新哈希对象\n                    while True:\n                        chunk = f.read(1024 * 1024)\n                        if not chunk:\n                            break\n                        hash_obj.update(chunk)\n                # 获取文件的sha256哈希值\n                file_hash = hash_obj.hexdigest()\n                # 比较文件名和哈希值\n                if file_name != file_hash:\n                    print(f\"Filename and hash mismatch: {file_path}, {file_hash}\")\n"
  },
  {
    "path": "download_model.py",
    "content": "import os\nos.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'\n\nimport traceback\nfrom glob import glob\nfrom huggingface_hub import snapshot_download\n\nmodel_name_list = [\n    # 'THUDM/chatglm-6b-int4-qe',\n    # 'THUDM/chatglm-6b-int4',\n    # 'THUDM/chatglm-6b',\n    # 'THUDM/glm-10b-chinese',\n    #\n    # 'THUDM/chatglm2-6b',\n    # 'THUDM/chatglm2-6b-int4',\n    #\n    # 'THUDM/chatglm3-6b',\n    # 'THUDM/chatglm3-6b-128k',\n\n    'THUDM/glm-4-9b-chat-1m',\n\n    # 'silver/chatglm-6b-slim',\n    # 'silver/chatglm-6b-int4-slim',\n    # 'silver/chatglm-6b-int4-qe-slim',\n]\n\nfor model_name in model_name_list:\n    dst_path = f'models/{model_name}'\n    if glob(f'{dst_path}/*.bin') or glob(f'{dst_path}/*.pt'):\n        print(f'{model_name} already downloaded')\n        continue\n    retry_times = 10\n    while retry_times > 0:\n        try:\n            print(f'Downloading {model_name}')\n            snapshot_download(\n                repo_id=model_name,\n                max_workers=2,\n                # proxies={'https': 'http://127.0.0.1:7890'}\n            )\n            snapshot_download(\n                repo_id=model_name,\n                local_dir=dst_path,\n                local_dir_use_symlinks=False,\n            )\n            break\n        except:\n            traceback.print_exc()\n            retry_times -= 1\n            print(f'Retry download {model_name}, {retry_times} times left...')\n    print(f'{model_name} downloaded')\n"
  },
  {
    "path": "env_offline.bat",
    "content": "@echo off\n\necho Activate offline environment\n\nset DIR=%~dp0system\n\nset PATH=C:\\Windows\\system32;C:\\Windows;%DIR%\\git\\bin;%DIR%\\python;%DIR%\\python\\Scripts;%DIR%\\python\\Lib\\site-packages\\torch\\lib\nset PY_LIBS=%DIR%\\python\\Scripts\\Lib;%DIR%\\python\\Scripts\\Lib\\site-packages\nset PY_PIP=%DIR%\\python\\Scripts\nset SKIP_VENV=1\nset PIP_INSTALLER_LOCATION=%DIR%\\python\\get-pip.py\n"
  },
  {
    "path": "env_venv.bat",
    "content": "@echo off\n\nset DIR=.venv\n\ncd /D \"%~dp0\"\n\nif exist %DIR% goto :activate\necho Setup venv\npython -m venv .venv\n\n:activate\necho Activate venv\ncall .venv\\Scripts\\activate.bat\n"
  },
  {
    "path": "glm4/configuration_chatglm.py",
    "content": "from transformers import PretrainedConfig\n\n\nclass ChatGLMConfig(PretrainedConfig):\n    model_type = \"chatglm\"\n\n    def __init__(\n            self,\n            num_layers=28,\n            padded_vocab_size=65024,\n            hidden_size=4096,\n            ffn_hidden_size=13696,\n            kv_channels=128,\n            num_attention_heads=32,\n            seq_length=2048,\n            hidden_dropout=0.0,\n            classifier_dropout=None,\n            attention_dropout=0.0,\n            layernorm_epsilon=1e-5,\n            rmsnorm=True,\n            apply_residual_connection_post_layernorm=False,\n            post_layer_norm=True,\n            add_bias_linear=False,\n            add_qkv_bias=False,\n            bias_dropout_fusion=True,\n            multi_query_attention=False,\n            multi_query_group_num=1,\n            rope_ratio=1,\n            apply_query_key_layer_scaling=True,\n            attention_softmax_in_fp32=True,\n            fp32_residual_connection=False,\n            **kwargs\n    ):\n        self.num_layers = num_layers\n        self.vocab_size = padded_vocab_size\n        self.padded_vocab_size = padded_vocab_size\n        self.hidden_size = hidden_size\n        self.ffn_hidden_size = ffn_hidden_size\n        self.kv_channels = kv_channels\n        self.num_attention_heads = num_attention_heads\n        self.seq_length = seq_length\n        self.hidden_dropout = hidden_dropout\n        self.classifier_dropout = classifier_dropout\n        self.attention_dropout = attention_dropout\n        self.layernorm_epsilon = layernorm_epsilon\n        self.rmsnorm = rmsnorm\n        self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm\n        self.post_layer_norm = post_layer_norm\n        self.add_bias_linear = add_bias_linear\n        self.add_qkv_bias = add_qkv_bias\n        self.bias_dropout_fusion = bias_dropout_fusion\n        self.multi_query_attention = multi_query_attention\n        self.multi_query_group_num = multi_query_group_num\n        self.rope_ratio = rope_ratio\n        self.apply_query_key_layer_scaling = apply_query_key_layer_scaling\n        self.attention_softmax_in_fp32 = attention_softmax_in_fp32\n        self.fp32_residual_connection = fp32_residual_connection\n        super().__init__(**kwargs)\n"
  },
  {
    "path": "glm4/modeling_chatglm.py",
    "content": "\"\"\" PyTorch ChatGLM model. \"\"\"\nimport json\nimport math\nimport copy\nimport warnings\nimport re\nimport sys\n\nimport torch\nimport torch.utils.checkpoint\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss, LayerNorm, MSELoss, BCEWithLogitsLoss\nfrom torch.nn.utils import skip_init\nfrom typing import Optional, Tuple, Union, List, Callable, Dict, Any\nfrom copy import deepcopy\n\nfrom transformers.modeling_outputs import (\n    BaseModelOutputWithPast,\n    CausalLMOutputWithPast,\n    SequenceClassifierOutputWithPast,\n)\nfrom transformers.modeling_utils import PreTrainedModel\nfrom transformers.utils import logging, is_torch_npu_available\nfrom transformers.generation.logits_process import LogitsProcessor\nfrom transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig, ModelOutput\n\nfrom .configuration_chatglm import ChatGLMConfig\n\ntry:\n    from transformers.utils import is_flash_attn_greater_or_equal_2_10, is_flash_attn_2_available\n    if is_flash_attn_2_available():\n        from flash_attn import flash_attn_func, flash_attn_varlen_func\n        from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input  # noqa\nexcept:\n    pass\n\n\n# flags required to enable jit fusion kernels\n\nif sys.platform != 'darwin' and not is_torch_npu_available():\n    torch._C._jit_set_profiling_mode(False)\n    torch._C._jit_set_profiling_executor(False)\n    torch._C._jit_override_can_fuse_on_cpu(True)\n    torch._C._jit_override_can_fuse_on_gpu(True)\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"THUDM/ChatGLM\"\n_CONFIG_FOR_DOC = \"ChatGLMConfig\"\n\n\ndef default_init(cls, *args, **kwargs):\n    return cls(*args, **kwargs)\n\n\nclass InvalidScoreLogitsProcessor(LogitsProcessor):\n    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:\n        if torch.isnan(scores).any() or torch.isinf(scores).any():\n            scores.zero_()\n            scores[..., 198] = 5e4\n        return scores\n\n\ndef split_tensor_along_last_dim(\n        tensor: torch.Tensor,\n        num_partitions: int,\n        contiguous_split_chunks: bool = False,\n) -> List[torch.Tensor]:\n    \"\"\"Split a tensor along its last dimension.\n\n    Arguments:\n        tensor: input tensor.\n        num_partitions: number of partitions to split the tensor\n        contiguous_split_chunks: If True, make each chunk contiguous\n                                 in memory.\n\n    Returns:\n        A list of Tensors\n    \"\"\"\n    # Get the size and dimension.\n    last_dim = tensor.dim() - 1\n    last_dim_size = tensor.size()[last_dim] // num_partitions\n    # Split.\n    tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)\n    # Note: torch.split does not create contiguous tensors by default.\n    if contiguous_split_chunks:\n        return tuple(chunk.contiguous() for chunk in tensor_list)\n\n    return tensor_list\n\n\nclass RotaryEmbedding(nn.Module):\n    def __init__(self, dim, rope_ratio=1, original_impl=False, device=None, dtype=None):\n        super().__init__()\n        inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, device=device).to(dtype=dtype) / dim))\n        self.register_buffer(\"inv_freq\", inv_freq)\n        self.dim = dim\n        self.original_impl = original_impl\n        self.rope_ratio = rope_ratio\n\n    def forward_impl(\n            self, seq_len: int, n_elem: int, dtype: torch.dtype, device: torch.device, base: int = 10000\n    ):\n        \"\"\"Enhanced Transformer with Rotary Position Embedding.\n\n        Derived from: https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/\n        transformers/rope/__init__.py. MIT License:\n        https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/license.\n        \"\"\"\n        # $\\Theta = {\\theta_i = 10000^{\\frac{2(i-1)}{d}}, i \\in [1, 2, ..., \\frac{d}{2}]}$\n        base = base * self.rope_ratio\n        theta = 1.0 / (base ** (torch.arange(0, n_elem, 2, dtype=torch.float, device=device) / n_elem))\n\n        # Create position indexes `[0, 1, ..., seq_len - 1]`\n        seq_idx = torch.arange(seq_len, dtype=torch.float, device=device)\n\n        # Calculate the product of position index and $\\theta_i$\n        idx_theta = torch.outer(seq_idx, theta).float()\n\n        cache = torch.stack([torch.cos(idx_theta), torch.sin(idx_theta)], dim=-1)\n\n        # this is to mimic the behaviour of complex32, else we will get different results\n        if dtype in (torch.float16, torch.bfloat16, torch.int8):\n            cache = cache.bfloat16() if dtype == torch.bfloat16 else cache.half()\n        return cache\n\n    def forward(self, max_seq_len, offset=0):\n        return self.forward_impl(\n            max_seq_len, self.dim, dtype=self.inv_freq.dtype, device=self.inv_freq.device\n        )\n\n\n@torch.jit.script\ndef apply_rotary_pos_emb(x: torch.Tensor, rope_cache: torch.Tensor) -> torch.Tensor:\n    # x: [b, np, sq, hn]\n    b, np, sq, hn = x.size(0), x.size(1), x.size(2), x.size(3)\n    rot_dim = rope_cache.shape[-2] * 2\n    x, x_pass = x[..., :rot_dim], x[..., rot_dim:]\n    # truncate to support variable sizes\n    rope_cache = rope_cache[:, :sq]\n    xshaped = x.reshape(b, np, sq, rot_dim // 2, 2)\n    rope_cache = rope_cache.view(-1, 1, sq, xshaped.size(3), 2)\n    x_out2 = torch.stack(\n        [\n            xshaped[..., 0] * rope_cache[..., 0] - xshaped[..., 1] * rope_cache[..., 1],\n            xshaped[..., 1] * rope_cache[..., 0] + xshaped[..., 0] * rope_cache[..., 1],\n        ],\n        -1,\n    )\n    x_out2 = x_out2.flatten(3)\n    return torch.cat((x_out2, x_pass), dim=-1)\n\n\nclass RMSNorm(torch.nn.Module):\n    def __init__(self, normalized_shape, eps=1e-5, device=None, dtype=None, **kwargs):\n        super().__init__()\n        self.weight = torch.nn.Parameter(torch.empty(normalized_shape, device=device, dtype=dtype))\n        self.eps = eps\n\n    def forward(self, hidden_states: torch.Tensor):\n        input_dtype = hidden_states.dtype\n        variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)\n        hidden_states = hidden_states * torch.rsqrt(variance + self.eps)\n\n        return (self.weight * hidden_states).to(input_dtype)\n\n\nclass CoreAttention(torch.nn.Module):\n    def __init__(self, config: ChatGLMConfig, layer_number):\n        super(CoreAttention, self).__init__()\n        self.config = config\n        self.apply_query_key_layer_scaling = config.apply_query_key_layer_scaling\n        self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32\n        if self.apply_query_key_layer_scaling:\n            self.attention_softmax_in_fp32 = True\n        self.layer_number = max(1, layer_number)\n        self.is_causal = True\n\n        projection_size = config.kv_channels * config.num_attention_heads\n\n        # Per attention head and per partition values.\n        self.hidden_size_per_partition = projection_size\n        self.hidden_size_per_attention_head = projection_size // config.num_attention_heads\n        self.num_attention_heads_per_partition = config.num_attention_heads\n\n        coeff = None\n        self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)\n        if self.apply_query_key_layer_scaling:\n            coeff = self.layer_number\n            self.norm_factor *= coeff\n        self.coeff = coeff\n\n        self.attention_dropout = torch.nn.Dropout(config.attention_dropout)\n\n    def forward(self, query_layer, key_layer, value_layer, attention_mask):\n        # [b, np, sq, sk]\n        output_size = (query_layer.size(0), query_layer.size(1), query_layer.size(2), key_layer.size(2))\n\n        # [b, np, sq, hn] -> [b * np, sq, hn]\n        query_layer = query_layer.view(output_size[0] * output_size[1], output_size[2], -1)\n        # [b, np, sk, hn] -> [b * np, sk, hn]\n        key_layer = key_layer.view(output_size[0] * output_size[1], output_size[3], -1)\n\n        # preallocting input tensor: [b * np, sq, sk]\n        matmul_input_buffer = torch.empty(\n            output_size[0] * output_size[1], output_size[2], output_size[3], dtype=query_layer.dtype,\n            device=query_layer.device\n        )\n\n        # Raw attention scores. [b * np, sq, sk]\n        matmul_result = torch.baddbmm(\n            matmul_input_buffer,\n            query_layer,  # [b * np, sq, hn]\n            key_layer.transpose(1, 2),  # [b * np, hn, sk]\n            beta=0.0,\n            alpha=(1.0 / self.norm_factor),\n        )\n\n        # change view to [b, np, sq, sk]\n        attention_scores = matmul_result.view(*output_size)\n\n        # ===========================\n        # Attention probs and dropout\n        # ===========================\n\n        # attention scores and attention mask [b, np, sq, sk]\n        if self.attention_softmax_in_fp32:\n            attention_scores = attention_scores.float()\n        if self.coeff is not None:\n            attention_scores = attention_scores * self.coeff\n        if attention_mask is None and attention_scores.shape[2] == attention_scores.shape[3]:\n            attention_mask = torch.ones(output_size[0], 1, output_size[2], output_size[3],\n                                        device=attention_scores.device, dtype=torch.bool)\n            attention_mask.tril_()\n            attention_mask = ~attention_mask\n        if attention_mask is not None:\n            attention_scores = attention_scores.masked_fill(attention_mask, float(\"-inf\"))\n        attention_probs = F.softmax(attention_scores, dim=-1)\n        attention_probs = attention_probs.type_as(value_layer)\n\n        # This is actually dropping out entire tokens to attend to, which might\n        # seem a bit unusual, but is taken from the original Transformer paper.\n        attention_probs = self.attention_dropout(attention_probs)\n\n        # query layer shape: [b * np, sq, hn]\n        # value layer shape: [b, np, sk, hn]\n        # attention shape: [b, np, sq, sk]\n        # context layer shape: [b, np, sq, hn]\n        output_size = (value_layer.size(0), value_layer.size(1), query_layer.size(1), value_layer.size(3))\n        # change view [b * np, sk, hn]\n        value_layer = value_layer.view(output_size[0] * output_size[1], value_layer.size(2), -1)\n        # change view [b * np, sq, sk]\n        attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1)\n        # matmul: [b * np, sq, hn]\n        context_layer = torch.bmm(attention_probs, value_layer)\n        # change view [b, np, sq, hn]\n        context_layer = context_layer.view(*output_size)\n        # [b, np, sq, hn] --> [b, sq, np, hn]\n        context_layer = context_layer.transpose(1, 2).contiguous()\n        # [b, sq, np, hn] --> [b, sq, hp]\n        new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)\n        context_layer = context_layer.reshape(*new_context_layer_shape)\n\n        return context_layer\n\n\nclass SdpaAttention(CoreAttention):\n    def forward(self, query_layer, key_layer, value_layer, attention_mask):\n        if attention_mask is None and query_layer.shape[2] == key_layer.shape[2]:\n            context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer,\n                                                                             is_causal=True,\n                                                                             dropout_p=self.config.attention_dropout if self.training else 0.0)\n        else:\n            if attention_mask is not None:\n                attention_mask = ~attention_mask\n            context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer,\n                                                                             attention_mask,\n                                                                             dropout_p=self.config.attention_dropout if self.training else 0.0)\n        context_layer = context_layer.transpose(1, 2).contiguous()\n        new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)\n        context_layer = context_layer.reshape(*new_context_layer_shape)\n        return context_layer\n\n\ndef _get_unpad_data(attention_mask):\n    seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)\n    indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()\n    max_seqlen_in_batch = seqlens_in_batch.max().item()\n    cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))\n    return (\n        indices,\n        cu_seqlens,\n        max_seqlen_in_batch,\n    )\n\n\n# Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2\nclass FlashAttention2(CoreAttention):\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()\n\n    def forward(self, query_states, key_states, value_states, attention_mask):\n        query_states = query_states.transpose(1, 2)\n        key_states = key_states.transpose(1, 2)\n        value_states = value_states.transpose(1, 2)\n        batch_size, query_length = query_states.shape[:2]\n        if not self._flash_attn_uses_top_left_mask:\n            causal = self.is_causal\n        else:\n            # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.\n            causal = self.is_causal and query_length != 1\n        dropout = self.config.attention_dropout if self.training else 0.0\n        # Contains at least one padding token in the sequence\n        if attention_mask is not None:\n            query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(\n                query_states, key_states, value_states, attention_mask, query_length\n            )\n\n            cu_seqlens_q, cu_seqlens_k = cu_seq_lens\n            max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens\n\n            attn_output_unpad = flash_attn_varlen_func(\n                query_states,\n                key_states,\n                value_states,\n                cu_seqlens_q=cu_seqlens_q,\n                cu_seqlens_k=cu_seqlens_k,\n                max_seqlen_q=max_seqlen_in_batch_q,\n                max_seqlen_k=max_seqlen_in_batch_k,\n                dropout_p=dropout,\n                softmax_scale=None,\n                causal=causal,\n            )\n\n            attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)\n        else:\n            attn_output = flash_attn_func(\n                query_states, key_states, value_states, dropout, softmax_scale=None, causal=causal\n            )\n        attn_output = attn_output.reshape(batch_size, query_length, self.hidden_size_per_partition).contiguous()\n        return attn_output\n\n    def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):\n        indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)\n        batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape\n\n        key_layer = index_first_axis(\n            key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k\n        )\n        value_layer = index_first_axis(\n            value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k\n        )\n        if query_length == kv_seq_len:\n            query_layer = index_first_axis(\n                query_layer.reshape(batch_size * kv_seq_len, self.num_attention_heads_per_partition, head_dim), indices_k\n            )\n            cu_seqlens_q = cu_seqlens_k\n            max_seqlen_in_batch_q = max_seqlen_in_batch_k\n            indices_q = indices_k\n        elif query_length == 1:\n            max_seqlen_in_batch_q = 1\n            cu_seqlens_q = torch.arange(\n                batch_size + 1, dtype=torch.int32, device=query_layer.device\n            )  # There is a memcpy here, that is very bad.\n            indices_q = cu_seqlens_q[:-1]\n            query_layer = query_layer.squeeze(1)\n        else:\n            # The -q_len: slice assumes left padding.\n            attention_mask = attention_mask[:, -query_length:]\n            query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)\n\n        return (\n            query_layer,\n            key_layer,\n            value_layer,\n            indices_q,\n            (cu_seqlens_q, cu_seqlens_k),\n            (max_seqlen_in_batch_q, max_seqlen_in_batch_k),\n        )\n\n\nCORE_ATTENTION_CLASSES = {\n    \"eager\": CoreAttention,\n    \"sdpa\": SdpaAttention,\n    \"flash_attention_2\": FlashAttention2\n}\n\n\nclass SelfAttention(torch.nn.Module):\n    \"\"\"Parallel self-attention layer abstract class.\n\n    Self-attention layer takes input with size [s, b, h]\n    and returns output of the same size.\n    \"\"\"\n\n    def __init__(self, config: ChatGLMConfig, layer_number, device=None):\n        super(SelfAttention, self).__init__()\n        self.layer_number = max(1, layer_number)\n\n        self.projection_size = config.kv_channels * config.num_attention_heads\n\n        # Per attention head and per partition values.\n        self.hidden_size_per_attention_head = self.projection_size // config.num_attention_heads\n        self.num_attention_heads_per_partition = config.num_attention_heads\n\n        self.multi_query_attention = config.multi_query_attention\n        self.qkv_hidden_size = 3 * self.projection_size\n        if self.multi_query_attention:\n            self.num_multi_query_groups_per_partition = config.multi_query_group_num\n            self.qkv_hidden_size = (\n                    self.projection_size + 2 * self.hidden_size_per_attention_head * config.multi_query_group_num\n            )\n        self.query_key_value = nn.Linear(config.hidden_size, self.qkv_hidden_size,\n                                         bias=config.add_bias_linear or config.add_qkv_bias,\n                                         device=device, **_config_to_kwargs(config)\n                                         )\n\n        self.core_attention = CORE_ATTENTION_CLASSES[config._attn_implementation](config, self.layer_number)\n\n        # Output.\n        self.dense = nn.Linear(self.projection_size, config.hidden_size, bias=config.add_bias_linear,\n                               device=device, **_config_to_kwargs(config)\n                               )\n\n    def _allocate_memory(self, inference_max_sequence_len, batch_size, device=None, dtype=None):\n        if self.multi_query_attention:\n            num_attention_heads = self.num_multi_query_groups_per_partition\n        else:\n            num_attention_heads = self.num_attention_heads_per_partition\n        return torch.empty(\n            inference_max_sequence_len,\n            batch_size,\n            num_attention_heads,\n            self.hidden_size_per_attention_head,\n            dtype=dtype,\n            device=device,\n        )\n\n    def forward(\n            self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True\n    ):\n        # hidden_states: [b, sq, h]\n\n        # =================================================\n        # Pre-allocate memory for key-values for inference.\n        # =================================================\n        # =====================\n        # Query, Key, and Value\n        # =====================\n\n        # Attention heads [b, sq, h] --> [b, sq, (np * 3 * hn)]\n        mixed_x_layer = self.query_key_value(hidden_states)\n\n        if self.multi_query_attention:\n            (query_layer, key_layer, value_layer) = mixed_x_layer.split(\n                [\n                    self.num_attention_heads_per_partition * self.hidden_size_per_attention_head,\n                    self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head,\n                    self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head,\n                ],\n                dim=-1,\n            )\n            query_layer = query_layer.view(\n                query_layer.size()[:-1] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head)\n            )\n            key_layer = key_layer.view(\n                key_layer.size()[:-1] + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head)\n            )\n            value_layer = value_layer.view(\n                value_layer.size()[:-1]\n                + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head)\n            )\n        else:\n            new_tensor_shape = mixed_x_layer.size()[:-1] + \\\n                               (self.num_attention_heads_per_partition,\n                                3 * self.hidden_size_per_attention_head)\n            mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)\n\n            # [b, sq, np, 3 * hn] --> 3 [b, sq, np, hn]\n            (query_layer, key_layer, value_layer) = split_tensor_along_last_dim(mixed_x_layer, 3)\n\n        # [b, sq, np, hn] -> [b, np, sq, hn]\n        query_layer, key_layer, value_layer = [k.transpose(1, 2) for k in [query_layer, key_layer, value_layer]]\n\n        # apply relative positional encoding (rotary embedding)\n        if rotary_pos_emb is not None:\n            query_layer = apply_rotary_pos_emb(query_layer, rotary_pos_emb)\n            key_layer = apply_rotary_pos_emb(key_layer, rotary_pos_emb)\n\n        # adjust key and value for inference\n        if kv_cache is not None:\n            cache_k, cache_v = kv_cache\n            key_layer = torch.cat((cache_k, key_layer), dim=2)\n            value_layer = torch.cat((cache_v, value_layer), dim=2)\n        if use_cache:\n            if kv_cache is None:\n                kv_cache = torch.cat((key_layer.unsqueeze(0).unsqueeze(0), value_layer.unsqueeze(0).unsqueeze(0)),\n                                     dim=1)\n            else:\n                kv_cache = (key_layer, value_layer)\n        else:\n            kv_cache = None\n\n        if self.multi_query_attention:\n            key_layer = key_layer.unsqueeze(2)\n            key_layer = key_layer.expand(\n                -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1, -1\n            )\n            key_layer = key_layer.contiguous().view(\n                key_layer.size()[:1] + (self.num_attention_heads_per_partition,) + key_layer.size()[3:]\n            )\n            value_layer = value_layer.unsqueeze(2)\n            value_layer = value_layer.expand(\n                -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1, -1\n            )\n            value_layer = value_layer.contiguous().view(\n                value_layer.size()[:1] + (self.num_attention_heads_per_partition,) + value_layer.size()[3:]\n            )\n\n        # ==================================\n        # core attention computation\n        # ==================================\n\n        context_layer = self.core_attention(query_layer, key_layer, value_layer, attention_mask)\n\n        # =================\n        # Output. [sq, b, h]\n        # =================\n\n        output = self.dense(context_layer)\n\n        return output, kv_cache\n\n\ndef _config_to_kwargs(args):\n    common_kwargs = {\n        \"dtype\": args.torch_dtype,\n    }\n    return common_kwargs\n\n\nclass MLP(torch.nn.Module):\n    \"\"\"MLP.\n\n    MLP will take the input with h hidden state, project it to 4*h\n    hidden dimension, perform nonlinear transformation, and project the\n    state back into h hidden dimension.\n    \"\"\"\n\n    def __init__(self, config: ChatGLMConfig, device=None):\n        super(MLP, self).__init__()\n\n        self.add_bias = config.add_bias_linear\n\n        # Project to 4h. If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf\n        self.dense_h_to_4h = nn.Linear(\n            config.hidden_size,\n            config.ffn_hidden_size * 2,\n            bias=self.add_bias,\n            device=device,\n            **_config_to_kwargs(config)\n        )\n\n        def swiglu(x):\n            x = torch.chunk(x, 2, dim=-1)\n            return F.silu(x[0]) * x[1]\n\n        self.activation_func = swiglu\n\n        # Project back to h.\n        self.dense_4h_to_h = nn.Linear(\n            config.ffn_hidden_size,\n            config.hidden_size,\n            bias=self.add_bias,\n            device=device,\n            **_config_to_kwargs(config)\n        )\n\n    def forward(self, hidden_states):\n        # [s, b, 4hp]\n        intermediate_parallel = self.dense_h_to_4h(hidden_states)\n        intermediate_parallel = self.activation_func(intermediate_parallel)\n        # [s, b, h]\n        output = self.dense_4h_to_h(intermediate_parallel)\n        return output\n\n\nclass GLMBlock(torch.nn.Module):\n    \"\"\"A single transformer layer.\n\n    Transformer layer takes input with size [s, b, h] and returns an\n    output of the same size.\n    \"\"\"\n\n    def __init__(self, config: ChatGLMConfig, layer_number, device=None):\n        super(GLMBlock, self).__init__()\n        self.layer_number = layer_number\n\n        self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm\n\n        self.fp32_residual_connection = config.fp32_residual_connection\n\n        LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm\n        # Layernorm on the input data.\n        self.input_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device,\n                                             dtype=config.torch_dtype)\n\n        # Self attention.\n        self.self_attention = SelfAttention(config, layer_number, device=device)\n        self.hidden_dropout = config.hidden_dropout\n\n        # Layernorm on the attention output\n        self.post_attention_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device,\n                                                      dtype=config.torch_dtype)\n\n        # MLP\n        self.mlp = MLP(config, device=device)\n\n    def forward(\n            self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True,\n    ):\n        # hidden_states: [s, b, h]\n\n        # Layer norm at the beginning of the transformer layer.\n        layernorm_output = self.input_layernorm(hidden_states)\n        # Self attention.\n        attention_output, kv_cache = self.self_attention(\n            layernorm_output,\n            attention_mask,\n            rotary_pos_emb,\n            kv_cache=kv_cache,\n            use_cache=use_cache\n        )\n\n        # Residual connection.\n        if self.apply_residual_connection_post_layernorm:\n            residual = layernorm_output\n        else:\n            residual = hidden_states\n\n        layernorm_input = torch.nn.functional.dropout(attention_output, p=self.hidden_dropout, training=self.training)\n        layernorm_input = residual + layernorm_input\n\n        # Layer norm post the self attention.\n        layernorm_output = self.post_attention_layernorm(layernorm_input)\n\n        # MLP.\n        mlp_output = self.mlp(layernorm_output)\n\n        # Second residual connection.\n        if self.apply_residual_connection_post_layernorm:\n            residual = layernorm_output\n        else:\n            residual = layernorm_input\n\n        output = torch.nn.functional.dropout(mlp_output, p=self.hidden_dropout, training=self.training)\n        output = residual + output\n\n        return output, kv_cache\n\n\nclass GLMTransformer(torch.nn.Module):\n    \"\"\"Transformer class.\"\"\"\n\n    def __init__(self, config: ChatGLMConfig, device=None):\n        super(GLMTransformer, self).__init__()\n\n        self.fp32_residual_connection = config.fp32_residual_connection\n        self.post_layer_norm = config.post_layer_norm\n\n        # Number of layers.\n        self.num_layers = config.num_layers\n\n        # Transformer layers.\n        def build_layer(layer_number):\n            return GLMBlock(config, layer_number, device=device)\n\n        self.layers = torch.nn.ModuleList([build_layer(i + 1) for i in range(self.num_layers)])\n\n        if self.post_layer_norm:\n            LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm\n            # Final layer norm before output.\n            self.final_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device,\n                                                 dtype=config.torch_dtype)\n\n        self.gradient_checkpointing = False\n\n    def _get_layer(self, layer_number):\n        return self.layers[layer_number]\n\n    def forward(\n            self, hidden_states, attention_mask, rotary_pos_emb, kv_caches=None,\n            use_cache: Optional[bool] = True,\n            output_hidden_states: Optional[bool] = False,\n    ):\n        if not kv_caches:\n            kv_caches = [None for _ in range(self.num_layers)]\n        presents = () if use_cache else None\n        if self.gradient_checkpointing and self.training:\n            if use_cache:\n                logger.warning_once(\n                    \"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...\"\n                )\n                use_cache = False\n\n        all_self_attentions = None\n        all_hidden_states = () if output_hidden_states else None\n        for index in range(self.num_layers):\n            if output_hidden_states:\n                all_hidden_states = all_hidden_states + (hidden_states,)\n\n            layer = self._get_layer(index)\n            if self.gradient_checkpointing and self.training:\n                layer_ret = torch.utils.checkpoint.checkpoint(\n                    layer,\n                    hidden_states,\n                    attention_mask,\n                    rotary_pos_emb,\n                    kv_caches[index],\n                    use_cache,\n                    use_reentrant=False\n                )\n            else:\n                layer_ret = layer(\n                    hidden_states,\n                    attention_mask,\n                    rotary_pos_emb,\n                    kv_cache=kv_caches[index],\n                    use_cache=use_cache\n                )\n            hidden_states, kv_cache = layer_ret\n            if use_cache:\n                # token by token decoding, use tuple format\n                if kv_caches[0] is not None:\n                    presents = presents + (kv_cache,)\n                # prefilling in decoding, use tensor format to save cuda memory\n                else:\n                    if len(presents) == 0:\n                        presents = kv_cache\n                    else:\n                        presents = torch.cat((presents, kv_cache.to(presents.device)), dim=0)\n\n        if output_hidden_states:\n            all_hidden_states = all_hidden_states + (hidden_states,)\n\n        # Final layer norm.\n        if self.post_layer_norm:\n            hidden_states = self.final_layernorm(hidden_states)\n\n        return hidden_states, presents, all_hidden_states, all_self_attentions\n\n\nclass ChatGLMPreTrainedModel(PreTrainedModel):\n    \"\"\"\n    An abstract class to handle weights initialization and\n    a simple interface for downloading and loading pretrained models.\n    \"\"\"\n\n    is_parallelizable = False\n    supports_gradient_checkpointing = True\n    config_class = ChatGLMConfig\n    base_model_prefix = \"transformer\"\n    _no_split_modules = [\"GLMBlock\"]\n    _supports_flash_attn_2 = True\n    _supports_sdpa = True\n\n    def _init_weights(self, module: nn.Module):\n        \"\"\"Initialize the weights.\"\"\"\n        return\n\n    def get_masks(self, input_ids, past_key_values, padding_mask=None):\n        if self.config._attn_implementation == \"flash_attention_2\":\n            if padding_mask is not None and not padding_mask.all():\n                return padding_mask\n            return None\n        batch_size, seq_length = input_ids.shape\n        full_attention_mask = torch.ones(batch_size, seq_length, seq_length, device=input_ids.device)\n        full_attention_mask.tril_()\n        past_length = 0\n        if past_key_values:\n            past_length = past_key_values[0][0].shape[2]\n        if past_length:\n            full_attention_mask = torch.cat((torch.ones(batch_size, seq_length, past_length,\n                                                        device=input_ids.device), full_attention_mask), dim=-1)\n        if padding_mask is not None:\n            full_attention_mask = full_attention_mask * padding_mask.unsqueeze(1)\n        if not past_length and padding_mask is not None:\n            full_attention_mask -= padding_mask.unsqueeze(-1) - 1\n        full_attention_mask = (full_attention_mask < 0.5).bool()\n        full_attention_mask.unsqueeze_(1)\n        return full_attention_mask\n\n    def get_position_ids(self, input_ids, device):\n        batch_size, seq_length = input_ids.shape\n        position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1)\n        return position_ids\n\n    def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs=None):\n        if not self.supports_gradient_checkpointing:\n            raise ValueError(f\"{self.__class__.__name__} does not support gradient checkpointing.\")\n\n\nclass Embedding(torch.nn.Module):\n    \"\"\"Language model embeddings.\"\"\"\n\n    def __init__(self, config: ChatGLMConfig, device=None):\n        super(Embedding, self).__init__()\n\n        self.hidden_size = config.hidden_size\n        # Word embeddings (parallel).\n        self.word_embeddings = nn.Embedding(\n            config.padded_vocab_size,\n            self.hidden_size,\n            dtype=config.torch_dtype,\n            device=device\n        )\n        self.fp32_residual_connection = config.fp32_residual_connection\n\n    def forward(self, input_ids):\n        # Embeddings.\n        words_embeddings = self.word_embeddings(input_ids)\n        embeddings = words_embeddings\n        # If the input flag for fp32 residual connection is set, convert for float.\n        if self.fp32_residual_connection:\n            embeddings = embeddings.float()\n        return embeddings\n\n\nclass ChatGLMModel(ChatGLMPreTrainedModel):\n    def __init__(self, config: ChatGLMConfig, device=None, empty_init=True):\n        super().__init__(config)\n        if empty_init:\n            init_method = skip_init\n        else:\n            init_method = default_init\n        init_kwargs = {}\n        if device is not None:\n            init_kwargs[\"device\"] = device\n        self.embedding = init_method(Embedding, config, **init_kwargs)\n        self.num_layers = config.num_layers\n        self.multi_query_group_num = config.multi_query_group_num\n        self.kv_channels = config.kv_channels\n\n        # Rotary positional embeddings\n        self.seq_length = config.seq_length\n        rotary_dim = (\n            config.hidden_size // config.num_attention_heads if config.kv_channels is None else config.kv_channels\n        )\n\n        self.rotary_pos_emb = RotaryEmbedding(rotary_dim // 2, rope_ratio=config.rope_ratio,\n                                              original_impl=config.original_rope,\n                                              device=device, dtype=config.torch_dtype)\n        self.encoder = init_method(GLMTransformer, config, **init_kwargs)\n        self.output_layer = init_method(nn.Linear, config.hidden_size, config.padded_vocab_size, bias=False,\n                                        dtype=config.torch_dtype, **init_kwargs)\n\n    def get_input_embeddings(self):\n        return self.embedding.word_embeddings\n\n    def set_input_embeddings(self, value):\n        self.embedding.word_embeddings = value\n\n    def forward(\n            self,\n            input_ids,\n            position_ids: Optional[torch.Tensor] = None,\n            attention_mask: Optional[torch.BoolTensor] = None,\n            full_attention_mask: Optional[torch.BoolTensor] = None,\n            past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,\n            inputs_embeds: Optional[torch.Tensor] = None,\n            use_cache: Optional[bool] = None,\n            output_attentions: Optional[bool] = None,\n            output_hidden_states: Optional[bool] = None,\n            return_dict: Optional[bool] = None,\n    ):\n        output_hidden_states = (\n            output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        )\n        use_cache = use_cache if use_cache is not None else self.config.use_cache\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n        batch_size, seq_length = input_ids.shape\n\n        if inputs_embeds is None:\n            inputs_embeds = self.embedding(input_ids)\n\n        if full_attention_mask is None:\n            if (attention_mask is not None and not attention_mask.all()) or (past_key_values and seq_length != 1):\n                full_attention_mask = self.get_masks(input_ids, past_key_values, padding_mask=attention_mask)\n\n        # Rotary positional embeddings\n        rotary_pos_emb = self.rotary_pos_emb(self.seq_length)\n        if position_ids is not None:\n            rotary_pos_emb = rotary_pos_emb[position_ids]\n        else:\n            rotary_pos_emb = rotary_pos_emb[None, :seq_length]\n\n        # Run encoder.\n        hidden_states, presents, all_hidden_states, all_self_attentions = self.encoder(\n            inputs_embeds, full_attention_mask, rotary_pos_emb=rotary_pos_emb,\n            kv_caches=past_key_values, use_cache=use_cache, output_hidden_states=output_hidden_states\n        )\n        if presents is not None and type(presents) is torch.Tensor:\n            presents = presents.split(1, dim=0)\n            presents = list(presents)\n            presents = [list(x.squeeze(0).split(1, dim=0)) for x in presents]\n            presents = [tuple([x.squeeze(0) for x in y]) for y in presents]\n            presents = tuple(presents)\n\n        if not return_dict:\n            return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)\n\n        return BaseModelOutputWithPast(\n            last_hidden_state=hidden_states,\n            past_key_values=presents,\n            hidden_states=all_hidden_states,\n            attentions=all_self_attentions,\n        )\n\n\nclass ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):\n    def __init__(self, config: ChatGLMConfig, empty_init=True, device=None):\n        super().__init__(config)\n\n        self.max_sequence_length = config.max_length\n        self.transformer = ChatGLMModel(config, empty_init=empty_init, device=device)\n        self.config = config\n\n    def _update_model_kwargs_for_generation(\n            self,\n            outputs: ModelOutput,\n            model_kwargs: Dict[str, Any],\n            is_encoder_decoder: bool = False,\n            standardize_cache_format: bool = False,\n    ) -> Dict[str, Any]:\n        # update past_key_values\n        model_kwargs[\"past_key_values\"] = self._extract_past_from_model_output(\n            outputs, standardize_cache_format=standardize_cache_format\n        )\n\n        # update attention mask\n        if \"attention_mask\" in model_kwargs:\n            attention_mask = model_kwargs[\"attention_mask\"]\n            model_kwargs[\"attention_mask\"] = torch.cat(\n                [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1\n            )\n\n        # update position ids\n        if \"position_ids\" in model_kwargs:\n            position_ids = model_kwargs[\"position_ids\"]\n            new_position_id = position_ids[..., -1:].clone()\n            new_position_id += 1\n            model_kwargs[\"position_ids\"] = torch.cat(\n                [position_ids, new_position_id], dim=-1\n            )\n\n        model_kwargs[\"is_first_forward\"] = False\n        return model_kwargs\n\n    def prepare_inputs_for_generation(\n            self,\n            input_ids: torch.LongTensor,\n            past_key_values: Optional[torch.Tensor] = None,\n            attention_mask: Optional[torch.Tensor] = None,\n            position_ids: Optional[torch.Tensor] = None,\n            use_cache: Optional[bool] = None,\n            is_first_forward: bool = True,\n            **kwargs\n    ) -> dict:\n        # only last token for input_ids if past is not None\n        if position_ids is None:\n            position_ids = self.get_position_ids(input_ids, device=input_ids.device)\n        if not is_first_forward:\n            if past_key_values is not None:\n                position_ids = position_ids[..., -1:]\n                input_ids = input_ids[:, -1:]\n        return {\n            \"input_ids\": input_ids,\n            \"past_key_values\": past_key_values,\n            \"position_ids\": position_ids,\n            \"attention_mask\": attention_mask,\n            \"return_last_logit\": True,\n            \"use_cache\": use_cache\n        }\n\n    def forward(\n            self,\n            input_ids: Optional[torch.Tensor] = None,\n            position_ids: Optional[torch.Tensor] = None,\n            attention_mask: Optional[torch.Tensor] = None,\n            past_key_values: Optional[Tuple[torch.FloatTensor]] = None,\n            inputs_embeds: Optional[torch.Tensor] = None,\n            labels: Optional[torch.Tensor] = None,\n            use_cache: Optional[bool] = None,\n            output_attentions: Optional[bool] = None,\n            output_hidden_states: Optional[bool] = None,\n            return_dict: Optional[bool] = None,\n            return_last_logit: Optional[bool] = False,\n    ):\n        use_cache = use_cache if use_cache is not None else self.config.use_cache\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n        transformer_outputs = self.transformer(\n            input_ids=input_ids,\n            position_ids=position_ids,\n            attention_mask=attention_mask,\n            past_key_values=past_key_values,\n            inputs_embeds=inputs_embeds,\n            use_cache=use_cache,\n            output_hidden_states=output_hidden_states,\n            return_dict=return_dict,\n        )\n\n        hidden_states = transformer_outputs[0]\n        if return_last_logit:\n            hidden_states = hidden_states[:, -1:]\n        lm_logits = self.transformer.output_layer(hidden_states)\n\n        loss = None\n        if labels is not None:\n            lm_logits = lm_logits.to(torch.float32)\n\n            # Shift so that tokens < n predict n\n            shift_logits = lm_logits[..., :-1, :].contiguous()\n            shift_labels = labels[..., 1:].contiguous()\n            # Flatten the tokens\n            loss_fct = CrossEntropyLoss(ignore_index=-100)\n            loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))\n\n            lm_logits = lm_logits.to(hidden_states.dtype)\n            loss = loss.to(hidden_states.dtype)\n\n        if not return_dict:\n            output = (lm_logits,) + transformer_outputs[1:]\n            return ((loss,) + output) if loss is not None else output\n\n        return CausalLMOutputWithPast(\n            loss=loss,\n            logits=lm_logits,\n            past_key_values=transformer_outputs.past_key_values,\n            hidden_states=transformer_outputs.hidden_states,\n            attentions=transformer_outputs.attentions,\n        )\n\n    @staticmethod\n    def _reorder_cache(\n            past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor\n    ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:\n        \"\"\"\n        This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or\n        [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct\n        beam_idx at every generation step.\n\n        Output shares the same memory storage as `past`.\n        \"\"\"\n        return tuple(\n            (\n                layer_past[0].index_select(0, beam_idx.to(layer_past[0].device)),\n                layer_past[1].index_select(0, beam_idx.to(layer_past[1].device)),\n            )\n            for layer_past in past\n        )\n\n    def process_response(self, output, history):\n        content = \"\"\n        history = deepcopy(history)\n        for response in output.split(\"<|assistant|>\"):\n            if \"\\n\" in response:\n                metadata, content = response.split(\"\\n\", maxsplit=1)\n            else:\n                metadata, content = \"\", response\n            if not metadata.strip():\n                content = content.strip()\n                history.append({\"role\": \"assistant\", \"metadata\": metadata, \"content\": content})\n                content = content.replace(\"[[训练时间]]\", \"2023年\")\n            else:\n                history.append({\"role\": \"assistant\", \"metadata\": metadata, \"content\": content})\n                if history[0][\"role\"] == \"system\" and \"tools\" in history[0]:\n                    parameters = json.loads(content)\n                    content = {\"name\": metadata.strip(), \"parameters\": parameters}\n                else:\n                    content = {\"name\": metadata.strip(), \"content\": content}\n        return content, history\n\n    @torch.inference_mode()\n    def chat(self, tokenizer, query: str, history: List[Dict] = None, role: str = \"user\",\n             max_length: int = 8192, num_beams=1, do_sample=True, top_p=0.8, temperature=0.8, logits_processor=None,\n             **kwargs):\n        if history is None:\n            history = []\n        if logits_processor is None:\n            logits_processor = LogitsProcessorList()\n        logits_processor.append(InvalidScoreLogitsProcessor())\n        gen_kwargs = {\"max_length\": max_length, \"num_beams\": num_beams, \"do_sample\": do_sample, \"top_p\": top_p,\n                      \"temperature\": temperature, \"logits_processor\": logits_processor, **kwargs}\n        history.append({\"role\": role, \"content\": query})\n        inputs = tokenizer.apply_chat_template(history, add_generation_prompt=True, tokenize=True,\n                                               return_tensors=\"pt\", return_dict=True)\n        inputs = inputs.to(self.device)\n        eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids(\"<|user|>\"),\n                        tokenizer.convert_tokens_to_ids(\"<|observation|>\")]\n        outputs = self.generate(**inputs, **gen_kwargs, eos_token_id=eos_token_id)\n        outputs = outputs.tolist()[0][len(inputs[\"input_ids\"][0]):-1]\n        response = tokenizer.decode(outputs)\n        response, history = self.process_response(response, history)\n        return response, history\n\n    @torch.inference_mode()\n    def stream_chat(self, tokenizer, query: str, history: List[Dict] = None, role: str = \"user\",\n                    past_key_values=None, max_length: int = 8192, do_sample=True, top_p=0.8, temperature=0.8,\n                    logits_processor=None, return_past_key_values=False, **kwargs):\n        if history is None:\n            history = []\n        if logits_processor is None:\n            logits_processor = LogitsProcessorList()\n        logits_processor.append(InvalidScoreLogitsProcessor())\n        eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids(\"<|user|>\"),\n                        tokenizer.convert_tokens_to_ids(\"<|observation|>\")]\n        gen_kwargs = {\"max_length\": max_length, \"do_sample\": do_sample, \"top_p\": top_p,\n                      \"temperature\": temperature, \"logits_processor\": logits_processor, **kwargs}\n        if past_key_values is None:\n            inputs = tokenizer.apply_chat_template(history + [{\"role\": role, \"content\": query}],\n                                                   add_generation_prompt=True, tokenize=True, return_tensors=\"pt\",\n                                                   return_dict=True)\n        else:\n            inputs = tokenizer.apply_chat_template([{\"role\": role, \"content\": query}], add_special_tokens=False,\n                                                   add_generation_prompt=True, tokenize=True, return_tensors=\"pt\",\n                                                   return_dict=True)\n        inputs = inputs.to(self.device)\n        if past_key_values is not None:\n            past_length = past_key_values[0][0].shape[2]\n            inputs.position_ids += past_length\n            attention_mask = inputs.attention_mask\n            attention_mask = torch.cat((attention_mask.new_ones(1, past_length), attention_mask), dim=1)\n            inputs['attention_mask'] = attention_mask\n        history.append({\"role\": role, \"content\": query})\n        for outputs in self.stream_generate(**inputs, past_key_values=past_key_values,\n                                            eos_token_id=eos_token_id, return_past_key_values=return_past_key_values,\n                                            **gen_kwargs):\n            if return_past_key_values:\n                outputs, past_key_values = outputs\n            outputs = outputs.tolist()[0][len(inputs[\"input_ids\"][0]):-1]\n            response = tokenizer.decode(outputs)\n            if response and response[-1] != \"�\":\n                response, new_history = self.process_response(response, history)\n                if return_past_key_values:\n                    yield response, new_history, past_key_values\n                else:\n                    yield response, new_history\n\n    @torch.inference_mode()\n    def stream_generate(\n            self,\n            input_ids,\n            generation_config: Optional[GenerationConfig] = None,\n            logits_processor: Optional[LogitsProcessorList] = None,\n            stopping_criteria: Optional[StoppingCriteriaList] = None,\n            prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,\n            return_past_key_values=False,\n            **kwargs,\n    ):\n        batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]\n\n        if generation_config is None:\n            generation_config = self.generation_config\n        generation_config = copy.deepcopy(generation_config)\n        model_kwargs = generation_config.update(**kwargs)\n        model_kwargs[\"use_cache\"] = generation_config.use_cache\n        bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id\n\n        if isinstance(eos_token_id, int):\n            eos_token_id = [eos_token_id]\n        eos_token_id_tensor = torch.tensor(eos_token_id).to(input_ids.device) if eos_token_id is not None else None\n\n        has_default_max_length = kwargs.get(\"max_length\") is None and generation_config.max_length is not None\n        if has_default_max_length and generation_config.max_new_tokens is None:\n            warnings.warn(\n                f\"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. \"\n                \"This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we\"\n                \" recommend using `max_new_tokens` to control the maximum length of the generation.\",\n                UserWarning,\n            )\n        elif generation_config.max_new_tokens is not None:\n            generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length\n            if not has_default_max_length:\n                logger.warn(\n                    f\"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=\"\n                    f\"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. \"\n                    \"Please refer to the documentation for more information. \"\n                    \"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)\",\n                    UserWarning,\n                )\n\n        if input_ids_seq_length >= generation_config.max_length:\n            input_ids_string = \"decoder_input_ids\" if self.config.is_encoder_decoder else \"input_ids\"\n            logger.warning(\n                f\"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to\"\n                f\" {generation_config.max_length}. This can lead to unexpected behavior. You should consider\"\n                \" increasing `max_new_tokens`.\"\n            )\n\n        # 2. Set generation parameters if not already defined\n        logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()\n        stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()\n\n        logits_processor = self._get_logits_processor(\n            generation_config=generation_config,\n            input_ids_seq_length=input_ids_seq_length,\n            encoder_input_ids=input_ids,\n            prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,\n            logits_processor=logits_processor,\n        )\n\n        stopping_criteria = self._get_stopping_criteria(\n            generation_config=generation_config, stopping_criteria=stopping_criteria\n        )\n        logits_warper = self._get_logits_warper(generation_config)\n\n        unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)\n        scores = None\n        while True:\n            model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)\n            # forward pass to get next token\n            outputs = self(\n                **model_inputs,\n                return_dict=True,\n                output_attentions=False,\n                output_hidden_states=False,\n            )\n\n            next_token_logits = outputs.logits[:, -1, :]\n\n            # pre-process distribution\n            next_token_scores = logits_processor(input_ids, next_token_logits)\n            next_token_scores = logits_warper(input_ids, next_token_scores)\n\n            # sample\n            probs = nn.functional.softmax(next_token_scores, dim=-1)\n            if generation_config.do_sample:\n                next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)\n            else:\n                next_tokens = torch.argmax(probs, dim=-1)\n            # update generated ids, model inputs, and length for next step\n            input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)\n            model_kwargs = self._update_model_kwargs_for_generation(\n                outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder\n            )\n            unfinished_sequences = unfinished_sequences.mul(\n                next_tokens.tile(eos_token_id_tensor.shape[0], 1).ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0)\n            )\n            if return_past_key_values:\n                yield input_ids, outputs.past_key_values\n            else:\n                yield input_ids\n            # stop when each sentence is finished, or if we exceed the maximum length\n            if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):\n                break\n\n\nclass ChatGLMForSequenceClassification(ChatGLMPreTrainedModel):\n    def __init__(self, config: ChatGLMConfig, empty_init=True, device=None):\n        super().__init__(config)\n\n        self.num_labels = config.num_labels\n        self.transformer = ChatGLMModel(config, empty_init=empty_init, device=device)\n\n        self.classifier_head = nn.Linear(config.hidden_size, config.num_labels, bias=True, dtype=config.torch_dtype)\n        if config.classifier_dropout is not None:\n            self.dropout = nn.Dropout(config.classifier_dropout)\n        else:\n            self.dropout = None\n        self.config = config\n\n    def forward(\n            self,\n            input_ids: Optional[torch.LongTensor] = None,\n            position_ids: Optional[torch.LongTensor] = None,\n            attention_mask: Optional[torch.Tensor] = None,\n            full_attention_mask: Optional[torch.Tensor] = None,\n            past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,\n            inputs_embeds: Optional[torch.LongTensor] = None,\n            labels: Optional[torch.LongTensor] = None,\n            use_cache: Optional[bool] = None,\n            output_attentions: Optional[bool] = None,\n            output_hidden_states: Optional[bool] = None,\n            return_dict: Optional[bool] = None,\n    ) -> Union[Tuple[torch.Tensor, ...], SequenceClassifierOutputWithPast]:\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n        transformer_outputs = self.transformer(\n            input_ids=input_ids,\n            position_ids=position_ids,\n            attention_mask=attention_mask,\n            full_attention_mask=full_attention_mask,\n            past_key_values=past_key_values,\n            inputs_embeds=inputs_embeds,\n            use_cache=use_cache,\n            output_attentions=output_attentions,\n            output_hidden_states=output_hidden_states,\n            return_dict=return_dict,\n        )\n\n        hidden_states = transformer_outputs[0]\n        pooled_hidden_states = hidden_states[:, -1]\n        if self.dropout is not None:\n            pooled_hidden_states = self.dropout(pooled_hidden_states)\n        logits = self.classifier_head(pooled_hidden_states)\n\n        loss = None\n        if labels is not None:\n            if self.config.problem_type is None:\n                if self.num_labels == 1:\n                    self.config.problem_type = \"regression\"\n                elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n                    self.config.problem_type = \"single_label_classification\"\n                else:\n                    self.config.problem_type = \"multi_label_classification\"\n\n            if self.config.problem_type == \"regression\":\n                loss_fct = MSELoss()\n                if self.num_labels == 1:\n                    loss = loss_fct(logits.squeeze().float(), labels.squeeze())\n                else:\n                    loss = loss_fct(logits.float(), labels)\n            elif self.config.problem_type == \"single_label_classification\":\n                loss_fct = CrossEntropyLoss()\n                loss = loss_fct(logits.view(-1, self.num_labels).float(), labels.view(-1))\n            elif self.config.problem_type == \"multi_label_classification\":\n                loss_fct = BCEWithLogitsLoss()\n                loss = loss_fct(logits.float(), labels.view(-1, self.num_labels))\n\n        if not return_dict:\n            output = (logits,) + transformer_outputs[1:]\n            return ((loss,) + output) if loss is not None else output\n\n        return SequenceClassifierOutputWithPast(\n            loss=loss,\n            logits=logits,\n            past_key_values=transformer_outputs.past_key_values,\n            hidden_states=transformer_outputs.hidden_states,\n            attentions=transformer_outputs.attentions,\n        )\n"
  },
  {
    "path": "glm4/tokenization_chatglm.py",
    "content": "import regex as re\nimport base64\nimport os\nimport json\nimport tiktoken\nfrom torch import TensorType\nfrom typing import List, Optional, Union, Dict, Any\nfrom transformers import PreTrainedTokenizer\nfrom transformers.utils import logging, PaddingStrategy\nfrom transformers.tokenization_utils_base import EncodedInput, BatchEncoding\n\n\nclass ChatGLM4Tokenizer(PreTrainedTokenizer):\n    vocab_files_names = {\"vocab_file\": \"tokenizer.model\"}\n    model_input_names = [\"input_ids\", \"attention_mask\", \"position_ids\"]\n\n    def __init__(\n            self,\n            vocab_file,\n            padding_side=\"left\",\n            clean_up_tokenization_spaces=False,\n            encode_special_tokens=False,\n            **kwargs\n    ):\n        self.name = \"GLM4Tokenizer\"\n        self.vocab_file = vocab_file\n        pat_str = \"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\\\r\\\\n\\\\p{L}\\\\p{N}]?\\\\p{L}+|\\\\p{N}{1,3}| ?[^\\\\s\\\\p{L}\\\\p{N}]+[\\\\r\\\\n]*|\\\\s*[\\\\r\\\\n]+|\\\\s+(?!\\\\S)|\\\\s+\"\n        self.pat_str = re.compile(pat_str)\n        self.encode_special_tokens = encode_special_tokens\n\n        mergeable_ranks = {}\n        with open(vocab_file) as f:\n            for line in f:\n                token, rank = line.strip().split()\n                rank = int(rank)\n                token = base64.b64decode(token)\n                mergeable_ranks[token] = rank\n\n        self.mergeable_ranks = mergeable_ranks\n\n        self.tokenizer = tiktoken.Encoding(\n            name=\"my_tokenizer\",\n            pat_str=pat_str,\n            mergeable_ranks=mergeable_ranks,\n            special_tokens={}\n        )\n        self.decoder = {rank: token for token, rank in mergeable_ranks.items()}\n        self.n_words = len(self.decoder)\n\n        super().__init__(\n            padding_side=padding_side,\n            clean_up_tokenization_spaces=clean_up_tokenization_spaces,\n            **kwargs\n        )\n\n    @property\n    def vocab_size(self):\n        return self.n_words\n\n    def get_vocab(self):\n        \"\"\" Returns vocab as a dict \"\"\"\n        vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)}\n        vocab.update(self.added_tokens_encoder)\n        return vocab\n\n    def convert_tokens_to_string(self, tokens: List[Union[bytes, str, int]]) -> str:\n        \"\"\"\n        Converts a sequence of tokens in a single string.\n        \"\"\"\n        text = \"\"\n        temp = b\"\"\n        for t in tokens:\n            if isinstance(t, int):\n                t = chr(t)\n            if isinstance(t, str):\n                if temp:\n                    text += temp.decode(\"utf-8\", errors=\"replace\")\n            elif isinstance(t, bytes):\n                temp += t\n            else:\n                raise TypeError(\"token should only be of type int, bytes or str\")\n        if temp:\n            text += temp.decode(\"utf-8\", errors=\"replace\")\n        return text\n\n    def _tokenize(self, text, **kwargs):\n        tokens = []\n        ids = self.tokenizer.encode(text)\n        for t in ids:\n            tokens.append(self.decoder[t])\n        return tokens\n\n    def _convert_token_to_id(self, token):\n        \"\"\" Converts a token (str) in an id using the vocab. \"\"\"\n        return self.mergeable_ranks[token]\n\n    def _convert_id_to_token(self, index):\n        \"\"\"Converts an index (integer) in a token (str) using the vocab.\"\"\"\n        return self.decoder.get(index, \"\")\n\n    def save_vocabulary(self, save_directory, filename_prefix=None):\n        \"\"\"\n        Save the vocabulary and special tokens file to a directory.\n\n        Args:\n            save_directory (`str`):\n                The directory in which to save the vocabulary.\n            filename_prefix (`str`, *optional*):\n                An optional prefix to add to the named of the saved files.\n\n        Returns:\n            `Tuple(str)`: Paths to the files saved.\n        \"\"\"\n        if os.path.isdir(save_directory):\n            vocab_file = os.path.join(\n                save_directory, self.vocab_files_names[\"vocab_file\"]\n            )\n        else:\n            vocab_file = save_directory\n\n        with open(self.vocab_file, 'rb') as fin:\n            proto_str = fin.read()\n\n        with open(vocab_file, \"wb\") as writer:\n            writer.write(proto_str)\n\n        return (vocab_file,)\n\n    def get_prefix_tokens(self):\n        prefix_tokens = [self.convert_tokens_to_ids(\"[gMASK]\"), self.convert_tokens_to_ids(\"<sop>\")]\n        return prefix_tokens\n\n    def build_single_message(self, role, metadata, message, tokenize=True):\n        assert role in [\"system\", \"user\", \"assistant\", \"observation\"], role\n        if tokenize:\n            role_tokens = [self.convert_tokens_to_ids(f\"<|{role}|>\")] + self.tokenizer.encode(f\"{metadata}\\n\",\n                                                                                              disallowed_special=())\n            message_tokens = self.tokenizer.encode(message, disallowed_special=())\n            tokens = role_tokens + message_tokens\n            return tokens\n        else:\n            return str(f\"<|{role}|>{metadata}\\n{message}\")\n\n    # Use Jinja Template in tokenizer_config.json\n    # def apply_chat_template(\n    #         self,\n    #         conversation: Union[List[Dict[str, str]], List[List[Dict[str, str]]], \"Conversation\"],\n    #         add_generation_prompt: bool = False,\n    #         tokenize: bool = True,\n    #         padding: bool = False,\n    #         truncation: bool = False,\n    #         max_length: Optional[int] = None,\n    #         return_tensors: Optional[Union[str, TensorType]] = None,\n    #         return_dict: bool = False,\n    #         tokenizer_kwargs: Optional[Dict[str, Any]] = None,\n    #         add_special_tokens: bool = True,\n    #         **kwargs,\n    # ) -> Union[str, List[int], List[str], List[List[int]], BatchEncoding]:\n    #\n    #     if return_dict and not tokenize:\n    #         raise ValueError(\n    #             \"`return_dict=True` is incompatible with `tokenize=False`, because there is no dict \"\n    #             \"of tokenizer outputs to return.\"\n    #         )\n    #\n    #     def handle_single_conversation(conversation):\n    #         input_ids = self.get_prefix_tokens() if add_special_tokens else []\n    #         input_message = \"[gMASK]<sop>\" if add_special_tokens else \"\"\n    #         for item in conversation:\n    #             if item.get(\"tools\"):\n    #                 tools = item[\"tools\"]\n    #                 content = \"你是一个名为 GhatGLM 的人工智能助手。你是基于智谱AI训练的语言模型 GLM-4 模型开发的，你的任务是针对用户的问题和要求提供适当的答复和支持。\"\n    #                 content += \"\\n\\n# 可用工具\"\n    #                 for tool in tools:\n    #                     if tool[\"type\"] == \"function\":\n    #                         function = tool[\"function\"]\n    #                         content += f\"\\n\\n## {function['name']}\\n\\n{json.dumps(function, ensure_ascii=False, indent=4)}\"\n    #                         content += \"\\n在调用上述函数时，请使用 Json 格式表示调用的参数。\"\n    #                     elif tool[\"type\"] == \"python\":\n    #                         content += \"\\n\\n## python\\n\\n当你向 `python` 发送包含 Python 代码的消息时，该代码将会在一个有状态的 Jupyter notebook 环境中执行。\\n`python` 返回代码执行的输出，或在执行 60 秒后返回超时。\\n`/mnt/data` 将会持久化存储你的文件。在此会话中，`python` 无法访问互联网。不要使用 `python` 进行任何网络请求或者在线 API 调用，这些在线内容的访问将不会成功。\"\n    #                     elif tool[\"type\"] == \"simple_browser\":\n    #                         content += \"\\n\\n## simple_browser\\n\\n你可以使用 `simple_browser` 工具。该工具支持以下函数：\\n`search(query: str, recency_days: int)`：使用搜索引擎进行查询并显示结果，可以使用 `recency_days` 参数控制搜索内容的时效性。\\n`mclick(ids: list[int])`：获取一系列指定 id 的页面内容。每次调用时，须选择3-10个页面。选择多个角度的页面，同时尽可能选择可信任的信息来源。考虑到部分页面是无法加载的，你也可以多打开一些可能有用的页面而不用担心内容过多。\\n`open_url(url: str)`：打开指定的 URL。\\n\\n使用 `【{引用 id}†{引用文本}】` 来引用内容。\\n\\n操作步骤：1. 使用 `search` 来获得信息列表; 2. 使用 `mclick` 来获取指定 ID 页面的内容; 3. 根据获得的内容进行回复。在回复中应当引用信息来源。\\n 如果用户提供了 URL，也可以用 `open_url` 直接打开页面。\\n如果初次搜索结果没有找到合适的信息，也可以再次使用 `search` 进行搜索。\"\n    #                     elif tool[\"type\"] == \"cogview\":\n    #                         content += \"\\n\\n## cogview\\n\\n如果用户的请求中包含了对图像的描述，你可以使用 `cogview` 来生成图像并展示给用户。你需要向 `cogview` 发送图像描述，规则：\\n- 发送给 `cogview` 的消息必须使用英语。用户的中文描述必须完全翻译为英语。\\n- 应当尽可能详细地描述图像生成的需求，需求描述约 100 英文单词。\\n- 保持用户原始描述的意图。不要虚构内容或者没见过的人物。\\n- 如无特殊说明，所在地为中国，持有中国立场并遵循中国社会主义价值观。\"\n    #                     else:\n    #                         raise NotImplementedError(f\"Unknown tool type {tool['type']}\")\n    #                 input = self.build_single_message(\"system\", \"\", content, tokenize=tokenize)\n    #                 if tokenize:\n    #                     input_ids.extend(input)\n    #                 else:\n    #                     input_message += input\n    #             if item[\"content\"]:\n    #                 input = self.build_single_message(\n    #                     item[\"role\"],\n    #                     item.get(\"metadata\", \"\"),\n    #                     item[\"content\"],\n    #                     tokenize=tokenize\n    #                 )\n    #                 if tokenize:\n    #                     input_ids.extend(input)\n    #                 else:\n    #                     input_message += input\n    #         if add_generation_prompt:\n    #             if tokenize:\n    #                 input_ids.extend([self.convert_tokens_to_ids(\"<|assistant|>\")])\n    #             else:\n    #                 input_message += \"<|assistant|>\"\n    #         return input_ids if tokenize else input_message\n    #\n    #     # Main logic to handle different conversation formats\n    #     if isinstance(conversation, list) and all(isinstance(i, dict) for i in conversation):\n    #         result = handle_single_conversation(conversation)\n    #     elif isinstance(conversation, list) and all(isinstance(i, list) for i in conversation):\n    #         result = [handle_single_conversation(c) for c in conversation]\n    #     elif hasattr(conversation, \"messages\"):\n    #         result = handle_single_conversation(conversation.messages)\n    #     else:\n    #         raise ValueError(\"Invalid conversation format\")\n    #\n    #     if tokenize:\n    #         output = self.batch_encode_plus(\n    #             [result] if isinstance(result[0], int) else result,\n    #             padding=padding,\n    #             truncation=truncation,\n    #             max_length=max_length,\n    #             return_tensors=return_tensors,\n    #             is_split_into_words=True,\n    #             add_special_tokens=False\n    #         )\n    #         if return_dict:\n    #             return output\n    #         else:\n    #             return output[\"input_ids\"]\n    #     else:\n    #         return result\n\n    def build_inputs_with_special_tokens(\n            self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None\n    ) -> List[int]:\n        \"\"\"\n        Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\n        adding special tokens. A BERT sequence has the following format:\n\n        - single sequence: `[CLS] X [SEP]`\n        - pair of sequences: `[CLS] A [SEP] B [SEP]`\n\n        Args:\n            token_ids_0 (`List[int]`):\n                List of IDs to which the special tokens will be added.\n            token_ids_1 (`List[int]`, *optional*):\n                Optional second list of IDs for sequence pairs.\n\n        Returns:\n            `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.\n        \"\"\"\n        prefix_tokens = self.get_prefix_tokens()\n        token_ids_0 = prefix_tokens + token_ids_0\n        if token_ids_1 is not None:\n            token_ids_0 = token_ids_0 + token_ids_1 + [self.convert_tokens_to_ids(\"<eos>\")]\n        return token_ids_0\n\n    def _pad(\n            self,\n            encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],\n            max_length: Optional[int] = None,\n            padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,\n            pad_to_multiple_of: Optional[int] = None,\n            return_attention_mask: Optional[bool] = None,\n    ) -> dict:\n        \"\"\"\n        Pad encoded inputs (on left/right and up to predefined length or max length in the batch)\n\n        Args:\n            encoded_inputs:\n                Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).\n            max_length: maximum length of the returned list and optionally padding length (see below).\n                Will truncate by taking into account the special tokens.\n            padding_strategy: PaddingStrategy to use for padding.\n\n                - PaddingStrategy.LONGEST Pad to the longest sequence in the batch\n                - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)\n                - PaddingStrategy.DO_NOT_PAD: Do not pad\n                The tokenizer padding sides are defined in self.padding_side:\n\n                    - 'left': pads on the left of the sequences\n                    - 'right': pads on the right of the sequences\n            pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.\n                This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability\n                `>= 7.5` (Volta).\n            return_attention_mask:\n                (optional) Set to False to avoid returning attention mask (default: set to model specifics)\n        \"\"\"\n        # Load from model defaults\n        assert self.padding_side == \"left\"\n\n        required_input = encoded_inputs[self.model_input_names[0]]\n        seq_length = len(required_input)\n\n        if padding_strategy == PaddingStrategy.LONGEST:\n            max_length = len(required_input)\n\n        if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):\n            max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of\n\n        needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length\n\n        # Initialize attention mask if not present.\n        if \"attention_mask\" not in encoded_inputs:\n            encoded_inputs[\"attention_mask\"] = [1] * seq_length\n\n        if \"position_ids\" not in encoded_inputs:\n            encoded_inputs[\"position_ids\"] = list(range(seq_length))\n\n        if needs_to_be_padded:\n            difference = max_length - len(required_input)\n\n            if \"attention_mask\" in encoded_inputs:\n                encoded_inputs[\"attention_mask\"] = [0] * difference + encoded_inputs[\"attention_mask\"]\n            if \"position_ids\" in encoded_inputs:\n                encoded_inputs[\"position_ids\"] = [0] * difference + encoded_inputs[\"position_ids\"]\n            encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input\n\n        return encoded_inputs\n"
  },
  {
    "path": "gptq/README.md",
    "content": "# GPTQ-for-Bloom & LLaMa\n8 bits quantization of [Bloom](https://arxiv.org/pdf/2211.05100.pdf) using [GPTQ](https://arxiv.org/abs/2210.17323)\n\nGPTQ is SOTA one-shot weight quantization method\n\n**This code is based on [GPTQ-for-LLaMa](https://github.com/qwopqwop200/GPTQ-for-LLaMa)**\n\n## [Huggingface models](https://huggingface.co/BelleGroup/BELLE-7B-gptq) \n\n\n| model name       |  file size | GPU memory usage |\n| -------------------------------------------------- |  ------------------- | ------------------ |\n|           base                 |          27G        |       ~28.2G         |\n|           bloom7b-2m-8bit-128g.pt                  |          9.7G        |       ~11.4G          |\n|           bloom7b-2m-4bit-128g.pt                  |          6.9G        |        ~8.4G          |\n|           bloom7b-0.2m-8bit-128g.pt                  |          9.7G        |       ~11.4G          |\n|           bloom7b-0.2m-4bit-128g.pt                  |          6.9G        |        ~8.4G          |\n\n\nAll experiments were run on a single NVIDIA A100.\n\n## Installation\nIf you don't have [conda](https://docs.conda.io/en/latest/miniconda.html), install it first.\n```\nconda create --name gptq python=3.9 -y\nconda activate gptq\nconda install pytorch torchvision torchaudio pytorch-cuda=11.7 -c pytorch -c nvidia\n# Or, if you're having trouble with conda, use pip with python3.9:\n# pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117\n\npip install -r requirements.txt\npython setup_cuda.py install\n\n# Benchmark performance for FC2 layer of LLaMa-7B\nCUDA_VISIBLE_DEVICES=0 python test_kernel.py\n```\n## Dependencies\n\n* `torch`: tested on v2.0.0+cu117\n* `transformers`: tested on v4.28.0.dev0\n* `datasets`: tested on v2.10.1\n* `safetensors`: tested on v0.3.0\n* (to run 4-bit kernels: setup for compiling PyTorch CUDA extensions, see also https://pytorch.org/tutorials/advanced/cpp_extension.html, tested on CUDA 11.7)\n\n\n## Model inference with the saved model\n```\n# BELLE-7B-gptq: local saved model path from Huggingface\ngit lfs install\ngit clone https://huggingface.co/BelleGroup/BELLE-7B-gptq\n# model inference with the saved model\nCUDA_VISIBLE_DEVICES=0 python bloom_inference.py BELLE-7B-gptq --wbits 8 --groupsize 128 --load BELLE-7B-gptq/bloom7b-2m-8bit-128g.pt --text \"hello\"\n```\n\n## Model quantization\n\n```\n# BELLE-7B-gptq: local saved model path\n# Save compressed model\nCUDA_VISIBLE_DEVICES=0 python bloom.py BelleGroup/BELLE-7B-2M wikitext2 --wbits 8 --groupsize 128 --save BELLE-7B-gptq/bloom7b-2m-8bit-128g.pt\n\n```\nCUDA Kernels support 2,3,4,8 bits.\n\nBasically, 8-bit quantization and 128 groupsize are recommended.\n\n# Acknowledgements\nThis code is based on [GPTQ-for-LLaMa](https://github.com/qwopqwop200/GPTQ-for-LLaMa)\n\nThanks to [Bloom](https://arxiv.org/pdf/2211.05100.pdf), a powerful LLM.\n"
  },
  {
    "path": "gptq/gptq.py",
    "content": "import math\nimport time\n\nimport torch\nimport torch.nn as nn\nimport transformers\n\nfrom gptq.quant import *\n\n\nDEBUG = False \n\ntorch.backends.cuda.matmul.allow_tf32 = False\ntorch.backends.cudnn.allow_tf32 = False\n\n\nclass GPTQ:\n    def __init__(self, layer):\n        self.layer = layer\n        self.dev = self.layer.weight.device\n        W = layer.weight.data.clone()\n        if isinstance(self.layer, nn.Conv2d):\n            W = W.flatten(1)\n        if isinstance(self.layer, transformers.Conv1D):\n            W = W.t()\n        self.rows = W.shape[0]\n        self.columns = W.shape[1]\n        self.H = torch.zeros((self.columns, self.columns), device=self.dev)\n        self.nsamples = 0\n\n    def add_batch(self, inp, out):\n        if DEBUG:\n            self.inp1 = inp\n            self.out1 = out\n        if len(inp.shape) == 2:\n            inp = inp.unsqueeze(0)\n        tmp = inp.shape[0]\n        if isinstance(self.layer, nn.Linear) or isinstance(self.layer, transformers.Conv1D):\n            if len(inp.shape) == 3:\n                inp = inp.reshape((-1, inp.shape[-1]))\n            inp = inp.t()\n        if isinstance(self.layer, nn.Conv2d):\n            unfold = nn.Unfold(\n                self.layer.kernel_size,\n                dilation=self.layer.dilation,\n                padding=self.layer.padding,\n                stride=self.layer.stride\n            )\n            inp = unfold(inp)\n            inp = inp.permute([1, 0, 2])\n            inp = inp.flatten(1)\n        self.H *= self.nsamples / (self.nsamples + tmp)\n        self.nsamples += tmp\n        # inp = inp.float()\n        inp = math.sqrt(2 / self.nsamples) * inp.float()\n        # self.H += 2 / self.nsamples * inp.matmul(inp.t())\n        self.H += inp.matmul(inp.t())\n\n    def fasterquant(\n        self, blocksize=128, percdamp=.01, groupsize=-1\n    ):\n        W = self.layer.weight.data.clone()\n        if isinstance(self.layer, nn.Conv2d):\n            W = W.flatten(1)\n        if isinstance(self.layer, transformers.Conv1D):\n            W = W.t()\n        W = W.float()\n\n        tick = time.time()\n\n        if not self.quantizer.ready():\n            self.quantizer.find_params(W, weight=True)\n\n        H = self.H\n        del self.H\n        dead = torch.diag(H) == 0\n        H[dead, dead] = 1\n        W[:, dead] = 0\n\n        Losses = torch.zeros_like(W)\n        Q = torch.zeros_like(W)\n\n        damp = percdamp * torch.mean(torch.diag(H))\n        diag = torch.arange(self.columns, device=self.dev)\n        H[diag, diag] += damp\n        H = torch.linalg.cholesky(H)\n        H = torch.cholesky_inverse(H)\n        H = torch.linalg.cholesky(H, upper=True)\n        Hinv = H\n        \n        scale = []\n        zero = []\n        now_idx = 1\n\n        for i1 in range(0, self.columns, blocksize):\n            i2 = min(i1 + blocksize, self.columns)\n            count = i2 - i1\n\n            W1 = W[:, i1:i2].clone()\n            Q1 = torch.zeros_like(W1)\n            Err1 = torch.zeros_like(W1)\n            Losses1 = torch.zeros_like(W1)\n            Hinv1 = Hinv[i1:i2, i1:i2]\n\n            for i in range(count):\n                w = W1[:, i]\n                d = Hinv1[i, i]\n\n                if groupsize != -1:\n                    if (i1 + i) % groupsize == 0:\n                        self.quantizer.find_params(W[:, (i1 + i):(i1 + i + groupsize)], weight=True)\n                    \n                    if ((i1 + i) // groupsize) - now_idx == -1:\n                        scale.append(self.quantizer.scale)\n                        zero.append(self.quantizer.zero)\n                        now_idx += 1\n\n                q = quantize(\n                    w.unsqueeze(1), self.quantizer.scale, self.quantizer.zero, self.quantizer.maxq\n                ).flatten()\n                Q1[:, i] = q\n                Losses1[:, i] = (w - q) ** 2 / d ** 2\n\n                err1 = (w - q) / d\n                W1[:, i:] -= err1.unsqueeze(1).matmul(Hinv1[i, i:].unsqueeze(0))\n                Err1[:, i] = err1\n\n            Q[:, i1:i2] = Q1\n            Losses[:, i1:i2] = Losses1 / 2\n\n            W[:, i2:] -= Err1.matmul(Hinv[i1:i2, i2:])\n\n            if DEBUG:\n                self.layer.weight.data[:, :i2] = Q[:, :i2]\n                self.layer.weight.data[:, i2:] = W[:, i2:]\n                print(torch.sum((self.layer(self.inp1) - self.out1) ** 2))\n                print(torch.sum(Losses))\n\n        torch.cuda.synchronize()\n        print('time %.2f' % (time.time() - tick))\n        print('error', torch.sum(Losses).item())\n\n        if isinstance(self.layer, transformers.Conv1D):\n            Q = Q.t()\n        self.layer.weight.data = Q.reshape(self.layer.weight.shape).to(self.layer.weight.data.dtype)\n        if DEBUG:\n            print(torch.sum((self.layer(self.inp1) - self.out1) ** 2))\n            \n        if scale == []:\n            scale.append(self.quantizer.scale)\n            zero.append(self.quantizer.zero)\n        scale = torch.cat(scale,dim=1)\n        zero = torch.cat(zero,dim=1)\n        return scale,zero\n            \n    def free(self):\n        if DEBUG:\n            self.inp1 = None\n            self.out1 = None\n        self.H = None\n        self.Losses = None\n        self.Trace = None\n        torch.cuda.empty_cache()\n"
  },
  {
    "path": "gptq/llama.py",
    "content": "import time\n\nimport torch\nimport torch.nn as nn\n\nfrom gptq.gptq import *\nfrom gptq.modelutils import *\nfrom gptq.quant import *\n\n\ndef get_llama(model):\n    import torch\n    def skip(*args, **kwargs):\n        pass\n    torch.nn.init.kaiming_uniform_ = skip\n    torch.nn.init.uniform_ = skip\n    torch.nn.init.normal_ = skip\n    from transformers import LlamaForCausalLM\n    model = LlamaForCausalLM.from_pretrained(model, torch_dtype='auto')\n    model.seqlen = 2048\n    return model\n\n@torch.no_grad()\ndef llama_sequential(model, dataloader, dev):\n    print('Starting ...')\n\n    use_cache = model.config.use_cache\n    model.config.use_cache = False\n    layers = model.model.layers\n\n    model.model.embed_tokens = model.model.embed_tokens.to(dev)\n    model.model.norm = model.model.norm.to(dev)\n    layers[0] = layers[0].to(dev)\n\n    dtype = next(iter(model.parameters())).dtype\n    inps = torch.zeros(\n        (args.nsamples, model.seqlen, model.config.hidden_size), dtype=dtype, device=dev\n    )\n    cache = {'i': 0, 'attention_mask': None}\n\n    class Catcher(nn.Module):\n        def __init__(self, module):\n            super().__init__()\n            self.module = module\n        def forward(self, inp, **kwargs):\n            inps[cache['i']] = inp\n            cache['i'] += 1\n            cache['attention_mask'] = kwargs['attention_mask']\n            raise ValueError\n    layers[0] = Catcher(layers[0])\n    for batch in dataloader:\n        try:\n            model(batch[0].to(dev))\n        except ValueError:\n            pass\n    layers[0] = layers[0].module\n\n    layers[0] = layers[0].cpu()\n    model.model.embed_tokens = model.model.embed_tokens.cpu()\n    model.model.norm = model.model.norm.cpu()\n    torch.cuda.empty_cache()\n\n    outs = torch.zeros_like(inps)\n    attention_mask = cache['attention_mask']\n\n    print('Ready.')\n\n    quantizers = {}\n    for i in range(len(layers)):\n        layer = layers[i].to(dev)\n        full = find_layers(layer)\n        if args.true_sequential:\n            sequential = [\n                ['self_attn.k_proj', 'self_attn.v_proj', 'self_attn.q_proj'],\n                ['self_attn.o_proj'],\n                ['mlp.up_proj', 'mlp.gate_proj'],\n                ['mlp.down_proj']\n            ]\n        else:\n            sequential = [list(full.keys())]\n       \n        for names in sequential:\n            subset = {n: full[n] for n in names}\n            gptq = {}\n            for name in subset:\n                gptq[name] = GPTQ(subset[name])\n                gptq[name].quantizer = Quantizer()\n                gptq[name].quantizer.configure(\n                    args.wbits, perchannel=True, sym=args.sym, mse=False\n                )\n                \n            def add_batch(name):\n                def tmp(_, inp, out):\n                    gptq[name].add_batch(inp[0].data, out.data)\n                return tmp\n            handles = []\n            for name in subset:\n                handles.append(subset[name].register_forward_hook(add_batch(name)))\n            for j in range(args.nsamples):\n                outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask)[0]\n            for h in handles:\n                h.remove()\n\n            for name in subset:\n                print(i, name)\n                print('Quantizing ...')\n                scale,zero = gptq[name].fasterquant(percdamp=args.percdamp, groupsize=args.groupsize)\n                quantizers['model.layers.%d.%s' % (i, name)] = (gptq[name].quantizer,scale,zero)\n                gptq[name].free()\n                \n        for j in range(args.nsamples):\n            outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask)[0]\n\n        layers[i] = layer.cpu()\n        del layer\n        del gptq \n        torch.cuda.empty_cache()\n\n        inps, outs = outs, inps\n\n    model.config.use_cache = use_cache\n    \n    return quantizers\n\n@torch.no_grad()\ndef llama_eval(model, testenc, dev):\n    print('Evaluating ...')\n\n    testenc = testenc.input_ids\n    nsamples = testenc.numel() // model.seqlen\n\n    use_cache = model.config.use_cache\n    model.config.use_cache = False\n    layers = model.model.layers\n\n    model.model.embed_tokens = model.model.embed_tokens.to(dev)\n    layers[0] = layers[0].to(dev)\n\n    dtype = next(iter(model.parameters())).dtype\n    inps = torch.zeros(\n        (nsamples, model.seqlen, model.config.hidden_size), dtype=dtype, device=dev\n    )\n    cache = {'i': 0, 'attention_mask': None}\n\n    class Catcher(nn.Module):\n        def __init__(self, module):\n            super().__init__()\n            self.module = module\n        def forward(self, inp, **kwargs):\n            inps[cache['i']] = inp\n            cache['i'] += 1\n            cache['attention_mask'] = kwargs['attention_mask']\n            raise ValueError\n    layers[0] = Catcher(layers[0])\n    for i in range(nsamples):\n        batch = testenc[:, (i * model.seqlen):((i + 1) * model.seqlen)].to(dev)\n        try:\n            model(batch)\n        except ValueError:\n            pass\n    layers[0] = layers[0].module\n\n    layers[0] = layers[0].cpu()\n    model.model.embed_tokens = model.model.embed_tokens.cpu()\n    torch.cuda.empty_cache()\n\n    outs = torch.zeros_like(inps)\n    attention_mask = cache['attention_mask']\n\n    for i in range(len(layers)):\n        print(i)\n        layer = layers[i].to(dev)\n        \n        if args.nearest:\n            subset = find_layers(layer)\n            for name in subset:\n                quantizer = Quantizer()\n                quantizer.configure(\n                    args.wbits, perchannel=True, sym=False, mse=False\n                )\n                W = subset[name].weight.data\n                quantizer.find_params(W, weight=True)\n                subset[name].weight.data = quantize(\n                    W, quantizer.scale, quantizer.zero, quantizer.maxq\n                ).to(next(iter(layer.parameters())).dtype)\n\n        for j in range(nsamples):\n            outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask)[0]\n        layers[i] = layer.cpu()\n        del layer\n        torch.cuda.empty_cache()\n        inps, outs = outs, inps\n\n    if model.model.norm is not None:\n        model.model.norm = model.model.norm.to(dev)\n    model.lm_head = model.lm_head.to(dev)\n\n    testenc = testenc.to(dev)\n    nlls = []\n    for i in range(nsamples):\n        hidden_states = inps[i].unsqueeze(0)\n        if model.model.norm is not None:\n            hidden_states = model.model.norm(hidden_states)\n        lm_logits = model.lm_head(hidden_states)\n        shift_logits = lm_logits[:, :-1, :].contiguous()\n        shift_labels = testenc[\n            :, (i * model.seqlen):((i + 1) * model.seqlen)\n        ][:, 1:]\n        loss_fct = nn.CrossEntropyLoss()\n        loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))\n        neg_log_likelihood = loss.float() * model.seqlen\n        nlls.append(neg_log_likelihood)\n    ppl = torch.exp(torch.stack(nlls).sum() / (nsamples * model.seqlen))\n    print(ppl.item())\n\n    model.config.use_cache = use_cache\n\n# TODO: perform packing on GPU\ndef llama_pack(model, quantizers, wbits, groupsize):\n    layers = find_layers(model)\n    layers = {n: layers[n] for n in quantizers}\n    make_quant(model, quantizers, wbits, groupsize)\n    qlayers = find_layers(model, [QuantLinear])\n    print('Packing ...')\n    for name in qlayers:\n        print(name)\n        quantizers[name],scale,zero = quantizers[name]\n        quantizers[name],scale,zero = quantizers[name].cpu(),scale.cpu(),zero.cpu()\n        qlayers[name].pack(layers[name], scale, zero)\n    print('Done.')\n    return model\n\ndef load_quant(model, checkpoint, wbits, groupsize=-1,faster_kernel=False):\n    from transformers import LlamaConfig, LlamaForCausalLM \n    config = LlamaConfig.from_pretrained(model)\n    def noop(*args, **kwargs):\n        pass\n    torch.nn.init.kaiming_uniform_ = noop \n    torch.nn.init.uniform_ = noop \n    torch.nn.init.normal_ = noop \n\n    torch.set_default_dtype(torch.half)\n    transformers.modeling_utils._init_weights = False\n    torch.set_default_dtype(torch.half)\n    model = LlamaForCausalLM(config)\n    torch.set_default_dtype(torch.float)\n    model = model.eval()\n    layers = find_layers(model)\n    for name in ['lm_head']:\n        if name in layers:\n            del layers[name]\n    make_quant(model, layers, wbits, groupsize, faster=faster_kernel)\n\n    del layers\n    \n    print('Loading model ...')\n    if checkpoint.endswith('.safetensors'):\n        from safetensors.torch import load_file as safe_load\n        model.load_state_dict(safe_load(checkpoint))\n    else:\n        model.load_state_dict(torch.load(checkpoint))\n    model.seqlen = 2048\n    print('Done.')\n\n    return model\n\ndef llama_multigpu(model, gpus):\n    model.model.embed_tokens = model.model.embed_tokens.to(gpus[0])\n    if hasattr(model.model, 'norm') and model.model.norm:\n        model.model.norm = model.model.norm.to(gpus[-1])\n    import copy\n    model.lm_head = copy.deepcopy(model.lm_head).to(gpus[-1])\n\n    cache = {'mask': None}\n\n    class MoveModule(nn.Module):\n        def __init__(self, module):\n            super().__init__()\n            self.module = module\n            self.dev = next(iter(self.module.parameters())).device\n        def forward(self, *inp, **kwargs):\n            inp = list(inp)\n            if inp[0].device != self.dev:\n                inp[0] = inp[0].to(self.dev)\n            if cache['mask'] is None or cache['mask'].device != self.dev:\n                cache['mask'] = kwargs['attention_mask'].to(self.dev)\n            kwargs['attention_mask'] = cache['mask']\n            tmp = self.module(*inp, **kwargs)\n            return tmp\n\n    layers = model.model.layers\n    pergpu = math.ceil(len(layers) / len(gpus))\n    for i in range(len(layers)):\n        layers[i] = MoveModule(layers[i].to(gpus[i // pergpu]))\n\n    model.gpus = gpus\n\ndef benchmark(model, input_ids, check=False):\n    input_ids = input_ids.to(model.gpus[0] if hasattr(model, 'gpus') else DEV)\n    torch.cuda.synchronize()\n\n    cache = {'past': None}\n    def clear_past(i):\n        def tmp(layer, inp, out):\n            if cache['past']:\n                cache['past'][i] = None\n        return tmp\n    for i, layer in enumerate(model.model.layers):\n        layer.register_forward_hook(clear_past(i))\n\n    print('Benchmarking ...')\n\n    if check:\n        loss = nn.CrossEntropyLoss()\n        tot = 0.\n\n    def sync():\n        if hasattr(model, 'gpus'):\n            for gpu in model.gpus:\n                torch.cuda.synchronize(gpu)\n        else:\n            torch.cuda.synchronize()\n    max_memory = 0\n    with torch.no_grad():\n        attention_mask = torch.ones((1, input_ids.numel()), device=DEV)\n        times = []\n        for i in range(input_ids.numel()):\n            tick = time.time()\n            out = model(\n                input_ids[:, i:i+1],\n                past_key_values=cache['past'],\n                attention_mask=attention_mask[:, :(i + 1)].reshape((1, -1))\n            )\n            sync()\n            times.append(time.time() - tick)\n            print(i, times[-1])\n            max_memory = max(max_memory,torch.cuda.memory_allocated() / 1024 /1024)\n            if check and i != input_ids.numel() - 1:\n                tot += loss(out.logits[0].to(DEV), input_ids[:, (i + 1)].to(DEV)).float()\n            cache['past'] = list(out.past_key_values)\n            del out\n        sync()\n        import numpy as np\n        print('Median:', np.median(times))\n        if check:\n            print('PPL:', torch.exp(tot / (input_ids.numel() - 1)).item())\n            print('max memory(MiB):',max_memory)\n\n\nif __name__ == '__main__':\n    import argparse\n    from datautils import *\n\n    parser = argparse.ArgumentParser()\n\n    parser.add_argument(\n        'model', type=str,\n        help='llama model to load'\n    )\n    parser.add_argument(\n        'dataset', type=str, choices=['wikitext2', 'ptb', 'c4'],\n        help='Where to extract calibration data from.'\n    )\n    parser.add_argument(\n        '--seed',\n        type=int, default=0, help='Seed for sampling the calibration data.'\n    )\n    parser.add_argument(\n        '--nsamples', type=int, default=128,\n        help='Number of calibration data samples.'\n    )\n    parser.add_argument(\n        '--percdamp', type=float, default=.01,\n        help='Percent of the average Hessian diagonal to use for dampening.'\n    )\n    parser.add_argument(\n        '--nearest', action='store_true',\n        help='Whether to run the RTN baseline.'\n    ) \n    parser.add_argument(\n        '--wbits', type=int, default=16, choices=[2, 3, 4, 8, 16],\n        help='#bits to use for quantization; use 16 for evaluating base model.'\n    )\n    parser.add_argument(\n        '--trits', action='store_true',\n        help='Whether to use trits for quantization.'\n    )\n    parser.add_argument(\n        '--groupsize', type=int, default=-1,\n        help='Groupsize to use for quantization; default uses full row.'\n    )\n    parser.add_argument(\n        '--save', type=str, default='',\n        help='Save quantized checkpoint under this name.'\n    )\n    parser.add_argument(\n        '--save_safetensors', type=str, default='',\n        help='Save quantized `.safetensors` checkpoint under this name.'\n    )\n    parser.add_argument(\n        '--load', type=str, default='',\n        help='Load quantized model.'\n    )\n    parser.add_argument(\n        '--benchmark', type=int, default=0,\n        help='Number of tokens to use for benchmarking.'\n    )\n    parser.add_argument(\n        '--check', action='store_true',\n        help='Whether to compute perplexity during benchmarking for verification.'\n    )\n    parser.add_argument(\n        '--sym', action='store_true',\n        help='Whether to perform symmetric quantization.'\n    )\n    parser.add_argument(\n        '--act-order', action='store_true',\n        help='Whether to apply the activation order GPTQ heuristic'\n    )\n    parser.add_argument(\n        '--true-sequential', action='store_true',\n        help='Whether to run in true sequential model.'\n    )\n    parser.add_argument(\n        '--new-eval', action='store_true',\n        help='Whether to use the new PTB and C4 eval'\n    )\n    parser.add_argument(\n        '--faster-kernel', action='store_true',\n        help='Whether to use the new faster kernel for benchmarking.'\n    )\n    args = parser.parse_args()\n\n    if type(args.load) is not str:\n        args.load = args.load.as_posix()\n    \n    if args.load:\n        model = load_quant(args.model, args.load, args.wbits, args.groupsize, args.faster_kernel)\n    else:\n        model = get_llama(args.model)\n        model.eval()\n\n    dataloader, testloader = get_loaders(\n        args.dataset, nsamples=args.nsamples, seed=args.seed, model=args.model, seqlen=model.seqlen\n    )\n\n    if not args.load and args.wbits < 16 and not args.nearest:\n        tick = time.time()\n        quantizers = llama_sequential(model, dataloader, DEV)\n        print(time.time() - tick)\n\n    if args.benchmark:\n        gpus = [torch.device('cuda:%d' % i) for i in range(torch.cuda.device_count())]\n        if len(gpus) > 1:\n            llama_multigpu(model, gpus)\n        else:\n            model = model.to(DEV)\n        if args.benchmark:\n            input_ids = next(iter(dataloader))[0][:, :args.benchmark]\n            benchmark(model, input_ids, check=args.check)\n    if args.load:\n        exit()\n\n    datasets = ['wikitext2'] \n    if args.new_eval:\n      datasets = ['wikitext2', 'ptb-new', 'c4-new']\n    for dataset in datasets: \n        dataloader, testloader = get_loaders(\n            dataset, seed=args.seed, model=args.model, seqlen=model.seqlen\n        )\n        print(dataset)\n        llama_eval(model, testloader, DEV)\n\n    if args.save:\n        llama_pack(model, quantizers, args.wbits, args.groupsize)\n        torch.save(model.state_dict(), args.save) \n\n    if args.save_safetensors:\n        llama_pack(model, quantizers, args.wbits, args.groupsize)\n        from safetensors.torch import save_file as safe_save\n        safe_save(model.state_dict(), args.save_safetensors)\n"
  },
  {
    "path": "gptq/llama_inference.py",
    "content": "import time\n\nimport torch\nimport torch.nn as nn\n\nfrom gptq.gptq import *\nfrom gptq.modelutils import *\nfrom gptq.quant import *\n\nfrom transformers import AutoTokenizer\n\nDEV = torch.device('cuda:0')\n\ndef get_llama(model):\n    import torch\n    def skip(*args, **kwargs):\n        pass\n    torch.nn.init.kaiming_uniform_ = skip\n    torch.nn.init.uniform_ = skip\n    torch.nn.init.normal_ = skip\n    from transformers import LlamaForCausalLM\n    model = LlamaForCausalLM.from_pretrained(model, torch_dtype='auto')\n    model.seqlen = 2048\n    return model\n\ndef load_quant(model, checkpoint, wbits, groupsize):\n    from transformers import LlamaConfig, LlamaForCausalLM \n    config = LlamaConfig.from_pretrained(model)\n    def noop(*args, **kwargs):\n        pass\n    torch.nn.init.kaiming_uniform_ = noop \n    torch.nn.init.uniform_ = noop \n    torch.nn.init.normal_ = noop \n\n    torch.set_default_dtype(torch.half)\n    transformers.modeling_utils._init_weights = False\n    torch.set_default_dtype(torch.half)\n    model = LlamaForCausalLM(config)\n    torch.set_default_dtype(torch.float)\n    model = model.eval()\n    layers = find_layers(model)\n    for name in ['lm_head']:\n        if name in layers:\n            del layers[name]\n    make_quant(model, layers, wbits, groupsize)\n\n    print('Loading model ...')\n    if checkpoint.endswith('.safetensors'):\n        from safetensors.torch import load_file as safe_load\n        model.load_state_dict(safe_load(checkpoint))\n    else:\n        model.load_state_dict(torch.load(checkpoint))\n    model.seqlen = 2048\n    print('Done.')\n\n    return model\n\nif __name__ == '__main__':\n    import argparse\n    from datautils import *\n\n    parser = argparse.ArgumentParser()\n\n    parser.add_argument(\n        'model', type=str,\n        help='llama model to load'\n    )\n    parser.add_argument(\n        '--wbits', type=int, default=16, choices=[2, 3, 4, 8, 16],\n        help='#bits to use for quantization; use 16 for evaluating base model.'\n    )\n    parser.add_argument(\n        '--groupsize', type=int, default=-1,\n        help='Groupsize to use for quantization; default uses full row.'\n    )\n    parser.add_argument(\n        '--load', type=str, default='',\n        help='Load quantized model.'\n    )\n\n    parser.add_argument(\n        '--text', type=str,\n        help='input text'\n    )\n    \n    parser.add_argument(\n        '--min_length', type=int, default=10,\n        help='The minimum length of the sequence to be generated.'\n    )\n    \n    parser.add_argument(\n        '--max_length', type=int, default=1024,\n        help='The maximum length of the sequence to be generated.'\n    )\n    \n    parser.add_argument(\n        '--top_p', type=float , default=0.95,\n        help='If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation.'\n    )\n    \n    parser.add_argument(\n        '--temperature', type=float, default=0.8,\n        help='The value used to module the next token probabilities.'\n    )\n    \n    args = parser.parse_args()\n\n    if type(args.load) is not str:\n        args.load = args.load.as_posix()\n    \n    if args.load:\n        model = load_quant(args.model, args.load, args.wbits, args.groupsize)\n    else:\n        model = get_llama(args.model)\n        model.eval()\n        \n    model.to(DEV)\n    tokenizer = AutoTokenizer.from_pretrained(args.model)\n\n    print(\"Human:\")\n    line = input()\n    while line:\n        inputs = 'Human: ' + line.strip() + '\\n\\nAssistant:'\n        input_ids = tokenizer.encode(inputs, return_tensors=\"pt\").to(DEV)\n\n        with torch.no_grad():\n            generated_ids = model.generate(\n                input_ids,\n                do_sample=True,\n                min_length=args.min_length,\n                max_length=args.max_length,\n                top_p=args.top_p,\n                temperature=args.temperature,\n            )\n        print(\"Assistant:\\n\") \n        print(tokenizer.decode([el.item() for el in generated_ids[0]]))\n        print(\"\\n-------------------------------\\n\")\n        line = input()\n"
  },
  {
    "path": "gptq/modelutils.py",
    "content": "import torch\nimport torch.nn as nn\n\n\nDEV = torch.device('cuda:0')\n\n\ndef find_layers(module, layers=[nn.Conv2d, nn.Linear], name=''):\n    if type(module) in layers:\n        return {name: module}\n    res = {}\n    for name1, child in module.named_children():\n        res.update(find_layers(\n            child, layers=layers, name=name + '.' + name1 if name != '' else name1\n        ))\n    return res\n"
  },
  {
    "path": "gptq/quant.py",
    "content": "import numpy as np\nimport torch\nimport torch.nn as nn\nimport math\n\ndef quantize(x, scale, zero, maxq):\n    q = torch.clamp(torch.round(x / scale) + zero, 0, maxq)\n    return scale * (q - zero)\n\nclass Quantizer(nn.Module):\n\n    def __init__(self, shape=1):\n        super(Quantizer, self).__init__()\n        self.register_buffer('maxq', torch.tensor(0))\n        self.register_buffer('scale', torch.zeros(shape))\n        self.register_buffer('zero', torch.zeros(shape))\n\n    def configure(\n            self,\n            bits, perchannel=False, sym=True, \n            mse=False, norm=2.4, grid=100, maxshrink=.8\n        ):\n        self.maxq = torch.tensor(2 ** bits - 1)\n        self.perchannel = perchannel\n        self.sym = sym\n        self.mse = mse\n        self.norm = norm\n        self.grid = grid\n        self.maxshrink = maxshrink \n\n    def find_params(self, x, weight=False):\n        dev = x.device\n        self.maxq = self.maxq.to(dev)\n\n        shape = x.shape\n        if self.perchannel:\n            if weight:\n                x = x.flatten(1)\n            else:\n                if len(shape) == 4:\n                    x = x.permute([1, 0, 2, 3])\n                    x = x.flatten(1)\n                if len(shape) == 3:\n                    x = x.reshape((-1, shape[-1])).t()\n                if len(shape) == 2:\n                    x = x.t()\n        else:\n            x = x.flatten().unsqueeze(0)\n\n        tmp = torch.zeros(x.shape[0], device=dev)\n        xmin = torch.minimum(x.min(1)[0], tmp)\n        xmax = torch.maximum(x.max(1)[0], tmp)\n\n        if self.sym:\n            xmax = torch.maximum(torch.abs(xmin), xmax)\n            tmp = xmin < 0\n            if torch.any(tmp):\n                xmin[tmp] = -xmax[tmp]\n        tmp = (xmin == 0) & (xmax == 0)\n        xmin[tmp] = -1\n        xmax[tmp] = +1\n\n        self.scale = (xmax - xmin) / self.maxq\n        if self.sym:\n            self.zero = torch.full_like(self.scale, (self.maxq + 1) / 2)\n        else:\n            self.zero = torch.round(-xmin / self.scale)\n\n        if self.mse:\n            best = torch.full([x.shape[0]], float('inf'), device=dev)\n            for i in range(int(self.maxshrink * self.grid)):\n                p = 1 - i / self.grid \n                xmin1 = p * xmin\n                xmax1 = p * xmax\n                scale1 = (xmax1 - xmin1) / self.maxq\n                zero1 = torch.round(-xmin1 / scale1) if not self.sym else self.zero\n                q = quantize(x, scale1.unsqueeze(1), zero1.unsqueeze(1), self.maxq)\n                q -= x\n                q.abs_()\n                q.pow_(self.norm)\n                err = torch.sum(q, 1)\n                tmp = err < best\n                if torch.any(tmp):\n                    best[tmp] = err[tmp]\n                    self.scale[tmp] = scale1[tmp]\n                    self.zero[tmp] = zero1[tmp]\n        if not self.perchannel:\n            if weight:\n                tmp = shape[0]\n            else:\n                tmp = shape[1] if len(shape) != 3 else shape[2]\n            self.scale = self.scale.repeat(tmp)\n            self.zero = self.zero.repeat(tmp)\n\n        if weight:\n            shape = [-1] + [1] * (len(shape) - 1)\n            self.scale = self.scale.reshape(shape)\n            self.zero = self.zero.reshape(shape)\n            return\n        if len(shape) == 4:\n            self.scale = self.scale.reshape((1, -1, 1, 1))\n            self.zero = self.zero.reshape((1, -1, 1, 1))\n        if len(shape) == 3:\n            self.scale = self.scale.reshape((1, 1, -1))\n            self.zero = self.zero.reshape((1, 1, -1)) \n        if len(shape) == 2:\n            self.scale = self.scale.unsqueeze(0)\n            self.zero = self.zero.unsqueeze(0)\n\n    def quantize(self, x):\n        if self.ready():\n            return quantize(x, self.scale, self.zero, self.maxq)\n        return x\n\n    def enabled(self):\n        return self.maxq > 0\n\n    def ready(self):\n        return torch.all(self.scale != 0)\n\n\ntry:\n    import quant_cuda\nexcept:\n    print('CUDA extension not installed.')\n\n# Assumes layer is perfectly divisible into 256 * 256 blocks\nclass QuantLinear(nn.Module): \n    def __init__(self, bits, groupsize, infeatures, outfeatures):\n        super().__init__()\n        if bits not in [2,3,4,8]:\n            raise NotImplementedError(\"Only 2,3,4,8 bits are supported.\")\n        self.infeatures = infeatures\n        self.outfeatures = outfeatures\n        self.bits = bits\n        if groupsize != -1 and groupsize < 32 and groupsize != int(math.pow(2,int(math.log2(groupsize)))):\n            raise NotImplementedError(\"groupsize supports powers of 2 greater than 32. (e.g. : 32,64,128,etc)\")\n        groupsize = groupsize if groupsize != -1 else infeatures\n        self.groupsize = groupsize\n        self.register_buffer('qzeros', torch.zeros((math.ceil(infeatures/groupsize),outfeatures // 256 * (bits * 8)), dtype=torch.int))\n        self.register_buffer('scales', torch.zeros((math.ceil(infeatures/groupsize),outfeatures)))\n        self.register_buffer('bias', torch.zeros(outfeatures))\n        self.register_buffer(\n            'qweight', torch.zeros((infeatures // 256 * (bits * 8), outfeatures), dtype=torch.int)\n        )\n        self._initialized_quant_state = False\n\n    def pack(self, linear, scales, zeros):\n        scales = scales.t().contiguous()\n        zeros = zeros.t().contiguous()\n        scale_zeros = zeros * scales\n        self.scales = scales.clone()\n        if linear.bias is not None:\n            self.bias = linear.bias.clone() \n            \n        intweight = []\n        for idx in range(self.infeatures):\n            g_idx = idx // self.groupsize\n            intweight.append(torch.round((linear.weight.data[:,idx] + scale_zeros[g_idx]) / self.scales[g_idx]).to(torch.int)[:,None])\n        intweight = torch.cat(intweight,dim=1)\n        intweight = intweight.t().contiguous()\n        intweight = intweight.numpy().astype(np.uint32)\n        qweight = np.zeros(\n            (intweight.shape[0] // 256 * (self.bits * 8), intweight.shape[1]), dtype=np.uint32\n        )\n        i = 0\n        row = 0\n        while row < qweight.shape[0]:\n            if self.bits in [2,4,8]:\n                for j in range(i, i + (32//self.bits)):\n                    qweight[row] |= intweight[j] << (self.bits * (j - i))\n                i += 32//self.bits\n                row += 1\n            elif self.bits == 3:\n                for j in range(i, i + 10):\n                    qweight[row] |= intweight[j] << (3 * (j - i))\n                i += 10\n                qweight[row] |= intweight[i] << 30\n                row += 1\n                qweight[row] |= (intweight[i] >> 2) & 1\n                i += 1\n                for j in range(i, i + 10):\n                    qweight[row] |= intweight[j] << (3 * (j - i) + 1)\n                i += 10\n                qweight[row] |= intweight[i] << 31\n                row += 1\n                qweight[row] |= (intweight[i] >> 1) & 0x3\n                i += 1\n                for j in range(i, i + 10):\n                    qweight[row] |= intweight[j] << (3 * (j - i) + 2)\n                i += 10\n                row += 1\n            else:\n                raise NotImplementedError(\"Only 2,3,4,8 bits are supported.\")\n                \n        qweight = qweight.astype(np.int32)\n        self.qweight = torch.from_numpy(qweight) \n        \n        zeros -= 1;\n        zeros = zeros.numpy().astype(np.uint32)\n        qzeros = np.zeros((zeros.shape[0], zeros.shape[1] // 256 * (self.bits * 8)), dtype=np.uint32)\n        i = 0\n        col = 0\n        while col < qzeros.shape[1]:\n            if self.bits in [2,4,8]:\n                for j in range(i, i + (32//self.bits)):\n                    qzeros[:, col] |= zeros[:, j] << (self.bits * (j - i))\n                i += 32//self.bits\n                col += 1\n            elif self.bits == 3:\n                for j in range(i, i + 10):\n                    qzeros[:, col] |= zeros[:, j] << (3 * (j - i))\n                i += 10\n                qzeros[:, col] |= zeros[:, i] << 30\n                col += 1\n                qzeros[:, col] |= (zeros[:, i] >> 2) & 1\n                i += 1\n                for j in range(i, i + 10):\n                    qzeros[:, col] |= zeros[:, j] << (3 * (j - i) + 1)\n                i += 10\n                qzeros[:, col] |= zeros[:, i] << 31\n                col += 1\n                qzeros[:, col] |= (zeros[:, i] >> 1) & 0x3\n                i += 1\n                for j in range(i, i + 10):\n                    qzeros[:, col] |= zeros[:, j] << (3 * (j - i) + 2)\n                i += 10\n                col += 1\n            else:\n                raise NotImplementedError(\"Only 2,3,4,8 bits are supported.\")\n                \n        qzeros = qzeros.astype(np.int32)\n        self.qzeros = torch.from_numpy(qzeros) \n\n    def forward(self, x):\n        intermediate_dtype = torch.float32\n\n        if not self._initialized_quant_state:\n            # Do we even have a bias? Check for at least one non-zero element.\n            if self.bias is not None and bool(torch.any(self.bias != 0)):\n                # Then make sure it's the right type.\n                self.bias.data = self.bias.data.to(intermediate_dtype)\n            else:\n                self.bias = None\n\n        outshape = list(x.shape)\n        outshape[-1] = self.outfeatures\n        x = x.reshape(-1, x.shape[-1])\n        if self.bias is None:\n            y = torch.zeros(x.shape[0], outshape[-1], dtype=intermediate_dtype, device=x.device)\n        else:\n            y = self.bias.clone().repeat(x.shape[0], 1)\n\n        output_dtype = x.dtype\n        x = x.to(intermediate_dtype)\n        if self.bits == 2:\n            quant_cuda.vecquant2matmul(x, self.qweight, y, self.scales, self.qzeros, self.groupsize)\n        elif self.bits == 3:\n            quant_cuda.vecquant3matmul(x, self.qweight, y, self.scales, self.qzeros, self.groupsize)\n        elif self.bits == 4:\n            quant_cuda.vecquant4matmul(x, self.qweight, y, self.scales, self.qzeros, self.groupsize)\n        elif self.bits == 8:\n            quant_cuda.vecquant8matmul(x, self.qweight, y, self.scales, self.qzeros, self.groupsize)\n        else:\n            raise NotImplementedError(\"Only 2,3,4,8 bits are supported.\")\n        y = y.to(output_dtype)\n        return y.reshape(outshape)\n\ndef make_quant(module, names, bits, groupsize, name=''):\n    if isinstance(module, QuantLinear):\n        return\n    for attr in dir(module):\n        tmp = getattr(module, attr)\n        name1 = name + '.' + attr if name != '' else attr\n        if name1 in names:\n            setattr(\n                module, attr, QuantLinear(bits, groupsize, tmp.in_features, tmp.out_features)\n            )\n    for name1, child in module.named_children():\n        make_quant(child, names, bits, groupsize, name + '.' + name1 if name != '' else name1)\n"
  },
  {
    "path": "gptq/quant_cuda.cpp",
    "content": "#include <torch/all.h>\n#include <torch/python.h>\n#include <c10/cuda/CUDAGuard.h>\n\nvoid vecquant2matmul_cuda(\n  torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,\n  torch::Tensor scales, torch::Tensor zeros,\n  int groupsize\n); \n\nvoid vecquant2matmul(\n  torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,\n  torch::Tensor scales, torch::Tensor zeros,\n  int groupsize\n) {\n  const at::cuda::OptionalCUDAGuard device_guard(device_of(vec));\n  vecquant2matmul_cuda(vec, mat, mul, scales, zeros,groupsize);\n}\n\nvoid vecquant3matmul_cuda(\n  torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,\n  torch::Tensor scales, torch::Tensor zeros,\n  int groupsize\n); \n\nvoid vecquant3matmul(\n  torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,\n  torch::Tensor scales, torch::Tensor zeros,\n  int groupsize\n) {\n  const at::cuda::OptionalCUDAGuard device_guard(device_of(vec));\n  vecquant3matmul_cuda(vec, mat, mul, scales, zeros, groupsize);\n}\n\nvoid vecquant4matmul_cuda(\n  torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,\n  torch::Tensor scales, torch::Tensor zeros,\n  int groupsize\n); \n\nvoid vecquant4matmul(\n  torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,\n  torch::Tensor scales, torch::Tensor zeros,\n  int groupsize\n) {\n  const at::cuda::OptionalCUDAGuard device_guard(device_of(vec));\n  vecquant4matmul_cuda(vec, mat, mul, scales, zeros, groupsize);\n}\n\nvoid vecquant8matmul_cuda(\n  torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,\n  torch::Tensor scales, torch::Tensor zeros,\n  int groupsize\n); \n\nvoid vecquant8matmul(\n  torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,\n  torch::Tensor scales, torch::Tensor zeros,\n  int groupsize\n) {\n  const at::cuda::OptionalCUDAGuard device_guard(device_of(vec));\n  vecquant8matmul_cuda(vec, mat, mul, scales, zeros, groupsize);\n}\n\nPYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {\n  m.def(\"vecquant2matmul\", &vecquant2matmul, \"Vector 2-bit Quantized Matrix Multiplication (CUDA)\");\n  m.def(\"vecquant3matmul\", &vecquant3matmul, \"Vector 3-bit Quantized Matrix Multiplication (CUDA)\");\n  m.def(\"vecquant4matmul\", &vecquant4matmul, \"Vector 4-bit Quantized Matrix Multiplication (CUDA)\");\n  m.def(\"vecquant8matmul\", &vecquant8matmul, \"Vector 8-bit Quantized Matrix Multiplication (CUDA)\");\n}\n"
  },
  {
    "path": "gptq/quant_cuda_kernel.cu",
    "content": "#include <torch/all.h>\n#include <torch/python.h>\n#include <cuda.h>\n#include <cuda_runtime.h>\n\n// atomicAdd for double-precision floating-point numbers on hardware with\n// compute capability < 6.0 from:\n// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomic-functions\n#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 600\n__device__ double atomicAdd(\n    double* address,\n    double val\n) {\n  unsigned long long int* address_as_ull = (unsigned long long int*)address;\n  unsigned long long int old = *address_as_ull, assumed;\n\n  do {\n    assumed = old;\n    old = atomicCAS(\n      address_as_ull,\n      assumed,\n      __double_as_longlong(val + __longlong_as_double(assumed))\n    );\n\n  // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)\n  } while (assumed != old);\n\n  return __longlong_as_double(old);\n}\n#endif\n\ntemplate <typename scalar_t>\n__global__ void VecQuant2MatMulKernel(\n    const  scalar_t* __restrict__ vec,\n    const       int* __restrict__ mat,\n           scalar_t* __restrict__ mul,\n    const  scalar_t* __restrict__ scales,\n    const  \t\tint* __restrict__ zeros,\n    int batch,\n    int vec_height, \t\n    int height,\n    int width,\n    int zero_width,\n    int groupsize\n);\n\ntemplate <typename scalar_t>\n__global__ void VecQuant3MatMulKernel(\n    const  scalar_t* __restrict__ vec,\n    const       int* __restrict__ mat,\n           scalar_t* __restrict__ mul,\n    const  scalar_t* __restrict__ scales,\n    const  \tint* __restrict__ zeros,\n    int batch,\n    int vec_height, \t\n    int height,\n    int width,\n    int zero_width,\n    int groupsize\n);\n\ntemplate <typename scalar_t>\n__global__ void VecQuant4MatMulKernel(\n    const  scalar_t* __restrict__ vec,\n    const       int* __restrict__ mat,\n           scalar_t* __restrict__ mul,\n    const  scalar_t* __restrict__ scales,\n    const  \tint* __restrict__ zeros,\n    int batch,\n    int vec_height, \t\n    int height,\n    int width,\n    int zero_width,\n    int groupsize\n);\n\ntemplate <typename scalar_t>\n__global__ void VecQuant8MatMulKernel(\n    const  scalar_t* __restrict__ vec,\n    const       int* __restrict__ mat,\n           scalar_t* __restrict__ mul,\n    const  scalar_t* __restrict__ scales,\n    const  \tint* __restrict__ zeros,\n    int batch,\n    int vec_height, \t\n    int height,\n    int width,\n    int zero_width,\n    int groupsize\n);\n\nconst int BLOCKWIDTH  = 256;\nconst int BLOCKHEIGHT2 =  16;\nconst int BLOCKHEIGHT3 =  24;\nconst int BLOCKHEIGHT4 =  32; \nconst int BLOCKHEIGHT8 =  64;\n\n__device__ inline unsigned int as_unsigned(int i) {\n  return *reinterpret_cast<unsigned int*>(&i);\n}\n\nvoid vecquant2matmul_cuda(\n  torch::Tensor vec,\n  torch::Tensor mat,\n  torch::Tensor mul,\n  torch::Tensor scales,\n  torch::Tensor zeros,\n  int groupsize\n) {\n  int batch = vec.size(0);\n  int vec_height = vec.size(1);\n  int height = mat.size(0);\n  int width = mat.size(1);\n  int zero_width = zeros.size(1);\n\n  dim3 blocks(\n    (height + BLOCKHEIGHT2 - 1) / BLOCKHEIGHT2,\n    (width + BLOCKWIDTH - 1) / BLOCKWIDTH,\n    batch\n  );\n  dim3 threads(BLOCKWIDTH);\n\n  AT_DISPATCH_FLOATING_TYPES(\n    vec.type(), \"vecquant2matmul_cuda\", ([&] {\n      VecQuant2MatMulKernel<<<blocks, threads>>>(\n        vec.data<scalar_t>(), mat.data<int>(), mul.data<scalar_t>(),\n        scales.data<scalar_t>(), zeros.data<int>(),\n        batch, vec_height, height, width, zero_width, groupsize\n      );\n    })\n  );\n}\n\ntemplate <typename scalar_t>\n__global__ void VecQuant2MatMulKernel(\n    const  scalar_t* __restrict__ vec,\n    const       int* __restrict__ mat,\n           scalar_t* __restrict__ mul,\n    const  scalar_t* __restrict__ scales,\n    const  \tint* __restrict__ zeros,\n    int batch,\n    int vec_height,\n    int height,\n    int width,\n    int zero_width,\n    int groupsize\n) {\n  int b = blockIdx.z;\n  int h = BLOCKHEIGHT2 * blockIdx.x;\n  int w = BLOCKWIDTH * blockIdx.y + threadIdx.x;\n\n  __shared__ scalar_t blockvec[BLOCKWIDTH];\n  blockvec[threadIdx.x] = vec[b * vec_height + blockIdx.x * BLOCKWIDTH + threadIdx.x];\n  __syncthreads();\n\n  scalar_t res = 0;\n  int i = width * h + w;\n  int g_h = h * 16;\n  int k = 0;\n  \n  int z_w = w / 16; \n  int z_mod = (w % 16) * 2;\n\n  unsigned int tmp;\n\n  while (k < BLOCKWIDTH) {\n    tmp = as_unsigned(mat[i]);\n\t\n    int g = (g_h + k) / groupsize;\n    scalar_t scale = scales[g * width + w];\n    scalar_t zero = scale * scalar_t((as_unsigned(zeros[g * zero_width + z_w]) >> z_mod & 0x3) + 1);\n\t\n    res += (scale * scalar_t((tmp >> 0) & 0x3) - zero) * blockvec[k + 0];\n    res += (scale * scalar_t((tmp >> 2) & 0x3) - zero) * blockvec[k + 1];\n    res += (scale * scalar_t((tmp >> 4) & 0x3) - zero) * blockvec[k + 2];\n    res += (scale * scalar_t((tmp >> 6) & 0x3) - zero) * blockvec[k + 3];\n    res += (scale * scalar_t((tmp >> 8) & 0x3) - zero) * blockvec[k + 4];\n    res += (scale * scalar_t((tmp >> 10) & 0x3) - zero) * blockvec[k + 5];\n    res += (scale * scalar_t((tmp >> 12) & 0x3) - zero) * blockvec[k + 6];\n    res += (scale * scalar_t((tmp >> 14) & 0x3) - zero) * blockvec[k + 7];\n    res += (scale * scalar_t((tmp >> 16) & 0x3) - zero) * blockvec[k + 8];\n    res += (scale * scalar_t((tmp >> 18) & 0x3) - zero) * blockvec[k + 9];\n    res += (scale * scalar_t((tmp >> 20) & 0x3) - zero) * blockvec[k + 10];\n    res += (scale * scalar_t((tmp >> 22) & 0x3) - zero) * blockvec[k + 11];\n    res += (scale * scalar_t((tmp >> 24) & 0x3) - zero) * blockvec[k + 12];\n    res += (scale * scalar_t((tmp >> 26) & 0x3) - zero) * blockvec[k + 13];\n    res += (scale * scalar_t((tmp >> 28) & 0x3) - zero) * blockvec[k + 14];\n    res += (scale * scalar_t((tmp >> 30) & 0x3) - zero) * blockvec[k + 15];\n\t\n    i += width;\n    k += 16;\n  }\n\n  atomicAdd(&mul[b * width + w], res);\n}\n\nvoid vecquant3matmul_cuda(\n  torch::Tensor vec,\n  torch::Tensor mat,\n  torch::Tensor mul,\n  torch::Tensor scales,\n  torch::Tensor zeros,\n  int groupsize\n) {\n  int batch = vec.size(0);\n  int vec_height = vec.size(1);\n  int height = mat.size(0);\n  int width = mat.size(1);\n  int zero_width = zeros.size(1);\n\n  dim3 blocks(\n    (height + BLOCKHEIGHT3 - 1) / BLOCKHEIGHT3,\n    (width + BLOCKWIDTH - 1) / BLOCKWIDTH,\n    batch\n  );\n  dim3 threads(BLOCKWIDTH);\n\n  AT_DISPATCH_FLOATING_TYPES(\n    vec.type(), \"vecquant3matmul_cuda\", ([&] {\n      VecQuant3MatMulKernel<<<blocks, threads>>>(\n        vec.data<scalar_t>(), mat.data<int>(), mul.data<scalar_t>(),\n        scales.data<scalar_t>(), zeros.data<int>(),\n        batch, vec_height, height, width, zero_width, groupsize\n      );\n    })\n  );\n}\n\ntemplate <typename scalar_t>\n__global__ void VecQuant3MatMulKernel(\n    const  scalar_t* __restrict__ vec,\n    const       int* __restrict__ mat,\n           scalar_t* __restrict__ mul,\n    const  scalar_t* __restrict__ scales,\n    const  \tint* __restrict__ zeros,\n    int batch,\n    int vec_height,\n    int height,\n    int width,\n    int zero_width,\n    int groupsize\n) {\n  int b = blockIdx.z;\n  int h = BLOCKHEIGHT3 * blockIdx.x;\n  int w = BLOCKWIDTH * blockIdx.y + threadIdx.x;\n\n  __shared__ scalar_t blockvec[BLOCKWIDTH];\n  blockvec[threadIdx.x] = vec[b * vec_height + blockIdx.x * BLOCKWIDTH + threadIdx.x];\n  __syncthreads();\n\n  scalar_t res = 0;\n  int i = width * h + w;\n  int g_h = (h / 3) * 32;\n  int k = 0;\n  \n  int z_w = (w / 32) * 3; // ((w / 256) * 24) / 3 \n  int z_mod = w % 32;\n  int z_bit;\n  \n  if (z_mod != 10){\n    if (z_mod != 21){\n      z_bit = z_mod;\n      if (z_bit > 21){\n        z_bit -= 22;\n        z_bit *= 3;\n        z_bit += 2;\n        z_w += 2;\n      } else if (z_bit > 10){\n        z_bit -= 11;\n        z_bit *= 3;\n        z_bit += 1;\n        z_w += 1;\n      } else {\n        z_bit *= 3;\n      }\n    } else {\n      z_w += 1;\n    }\n  }\n \n  unsigned int tmp1;\n  unsigned int tmp2;\n  unsigned int tmp;\n  unsigned int z_tmp;\n\n  while (k < BLOCKWIDTH) {\n    tmp1 = as_unsigned(mat[i]);\n\t\n    int g = (g_h + k) / groupsize;\n    scalar_t scale = scales[g * width + w];\n    scalar_t zero;\n    if (z_mod == 10) {\n      z_tmp = (as_unsigned(zeros[g * zero_width + z_w]) >> 30) | ((as_unsigned(zeros[g * zero_width + (z_w + 1)]) << 2) & 0x4);\n      zero = scale * scalar_t((z_tmp) + 1);\n    } else if (z_mod == 21){\n      z_tmp = (as_unsigned(zeros[g * zero_width + z_w]) >> 31) | ((as_unsigned(zeros[g * zero_width + (z_w + 1)]) << 1) & 0x6);\n      zero = scale * scalar_t((z_tmp) + 1);\n    } else {\n      zero = scale * scalar_t(((as_unsigned(zeros[g * zero_width + z_w]) >> z_bit) & 0x7) + 1);\n    }\n\t\n    res += (scale * scalar_t((tmp1 >>  0) & 0x7) - zero) * blockvec[k + 0];\n    res += (scale * scalar_t((tmp1 >>  3) & 0x7) - zero) * blockvec[k + 1];\n    res += (scale * scalar_t((tmp1 >>  6) & 0x7) - zero) * blockvec[k + 2];\n    res += (scale * scalar_t((tmp1 >>  9) & 0x7) - zero) * blockvec[k + 3];\n    res += (scale * scalar_t((tmp1 >> 12) & 0x7) - zero) * blockvec[k + 4];\n    res += (scale * scalar_t((tmp1 >> 15) & 0x7) - zero) * blockvec[k + 5];\n    res += (scale * scalar_t((tmp1 >> 18) & 0x7) - zero) * blockvec[k + 6];\n    res += (scale * scalar_t((tmp1 >> 21) & 0x7) - zero) * blockvec[k + 7];\n    res += (scale * scalar_t((tmp1 >> 24) & 0x7) - zero) * blockvec[k + 8];\n    res += (scale * scalar_t((tmp1 >> 27) & 0x7) - zero) * blockvec[k + 9];\n\t\n    i += width;\n    tmp2 = as_unsigned(mat[i]);\n    tmp = (tmp1 >> 30) | ((tmp2 << 2) & 0x4);\n    tmp2 >>= 1;\n    res += (scale * scalar_t(tmp) - zero) * blockvec[k + 10];\n    k += 11;\n\t\n    res += (scale * scalar_t((tmp2 >>  0) & 0x7) - zero) * blockvec[k + 0];\n    res += (scale * scalar_t((tmp2 >>  3) & 0x7) - zero) * blockvec[k + 1];\n    res += (scale * scalar_t((tmp2 >>  6) & 0x7) - zero) * blockvec[k + 2];\n    res += (scale * scalar_t((tmp2 >>  9) & 0x7) - zero) * blockvec[k + 3];\n    res += (scale * scalar_t((tmp2 >> 12) & 0x7) - zero) * blockvec[k + 4];\n    res += (scale * scalar_t((tmp2 >> 15) & 0x7) - zero) * blockvec[k + 5];\n    res += (scale * scalar_t((tmp2 >> 18) & 0x7) - zero) * blockvec[k + 6];\n    res += (scale * scalar_t((tmp2 >> 21) & 0x7) - zero) * blockvec[k + 7];\n    res += (scale * scalar_t((tmp2 >> 24) & 0x7) - zero) * blockvec[k + 8];\n    res += (scale * scalar_t((tmp2 >> 27) & 0x7) - zero) * blockvec[k + 9];\n\t\n    i += width;\n    tmp1 = as_unsigned(mat[i]);\n    tmp = (tmp2 >> 30) | ((tmp1 << 1) & 0x6);\n    tmp1 >>= 2;\n    res += (scale * scalar_t(tmp) - zero) * blockvec[k + 10];\n    k += 11;\n\t\n    res += (scale * scalar_t((tmp1 >>  0) & 0x7) - zero) * blockvec[k + 0];\n    res += (scale * scalar_t((tmp1 >>  3) & 0x7) - zero) * blockvec[k + 1];\n    res += (scale * scalar_t((tmp1 >>  6) & 0x7) - zero) * blockvec[k + 2];\n    res += (scale * scalar_t((tmp1 >>  9) & 0x7) - zero) * blockvec[k + 3];\n    res += (scale * scalar_t((tmp1 >> 12) & 0x7) - zero) * blockvec[k + 4];\n    res += (scale * scalar_t((tmp1 >> 15) & 0x7) - zero) * blockvec[k + 5];\n    res += (scale * scalar_t((tmp1 >> 18) & 0x7) - zero) * blockvec[k + 6];\n    res += (scale * scalar_t((tmp1 >> 21) & 0x7) - zero) * blockvec[k + 7];\n    res += (scale * scalar_t((tmp1 >> 24) & 0x7) - zero) * blockvec[k + 8];\n    res += (scale * scalar_t((tmp1 >> 27) & 0x7) - zero) * blockvec[k + 9];\n\t\n    i += width;\n    k += 10;\n  }\n\n  atomicAdd(&mul[b * width + w], res);\n}\n\nvoid vecquant4matmul_cuda(\n  torch::Tensor vec,\n  torch::Tensor mat,\n  torch::Tensor mul,\n  torch::Tensor scales,\n  torch::Tensor zeros,\n  int groupsize\n) {\n  int batch = vec.size(0);\n  int vec_height = vec.size(1);\n  int height = mat.size(0);\n  int width = mat.size(1);\n  int zero_width = zeros.size(1);\n\n  dim3 blocks(\n    (height + BLOCKHEIGHT4 - 1) / BLOCKHEIGHT4,\n    (width + BLOCKWIDTH - 1) / BLOCKWIDTH,\n    batch\n  );\n  dim3 threads(BLOCKWIDTH);\n\n  AT_DISPATCH_FLOATING_TYPES(\n    vec.type(), \"vecquant4matmul_cuda\", ([&] {\n      VecQuant4MatMulKernel<<<blocks, threads>>>(\n        vec.data<scalar_t>(), mat.data<int>(), mul.data<scalar_t>(),\n        scales.data<scalar_t>(), zeros.data<int>(),\n        batch, vec_height, height, width, zero_width, groupsize\n      );\n    })\n  );\n}\n\ntemplate <typename scalar_t>\n__global__ void VecQuant4MatMulKernel(\n    const  scalar_t* __restrict__ vec,\n    const       int* __restrict__ mat,\n           scalar_t* __restrict__ mul,\n    const  scalar_t* __restrict__ scales,\n    const       int* __restrict__ zeros,\n    int batch,\n    int vec_height,\n    int height,\n    int width,\n    int zero_width,\n    int groupsize\n) {\n  int b = blockIdx.z;\n  int h = BLOCKHEIGHT4 * blockIdx.x;\n  int w = BLOCKWIDTH * blockIdx.y + threadIdx.x;\n\n  __shared__ scalar_t blockvec[BLOCKWIDTH];\n  blockvec[threadIdx.x] = vec[b * vec_height + blockIdx.x * BLOCKWIDTH + threadIdx.x];\n  __syncthreads();\n\n  scalar_t res = 0;\n  int i = width * h + w;\n  int g_h = h * 8;\n  int k = 0;\n\n  int z_w = w / 8; \n  int z_mod = (w % 8) * 4;\n\n  unsigned int tmp;\n\n  while (k < BLOCKWIDTH) {\n    tmp = as_unsigned(mat[i]);\n\t\n    int g = (g_h + k) / groupsize;\n    scalar_t scale = scales[g * width + w];\n    scalar_t zero = scale * scalar_t(((as_unsigned(zeros[g * zero_width + z_w]) >> z_mod) & 0xF) + 1);\n\t\n    res += (scale * scalar_t((tmp >> 0) & 0xF) - zero) * blockvec[k + 0];\n    res += (scale * scalar_t((tmp >> 4) & 0xF) - zero) * blockvec[k + 1];\n    res += (scale * scalar_t((tmp >> 8) & 0xF) - zero) * blockvec[k + 2];\n    res += (scale * scalar_t((tmp >> 12) & 0xF) - zero) * blockvec[k + 3];\n    res += (scale * scalar_t((tmp >> 16) & 0xF) - zero) * blockvec[k + 4];\n    res += (scale * scalar_t((tmp >> 20) & 0xF) - zero) * blockvec[k + 5];\n    res += (scale * scalar_t((tmp >> 24) & 0xF) - zero) * blockvec[k + 6];\n    res += (scale * scalar_t((tmp >> 28) & 0xF) - zero) * blockvec[k + 7];\n\t\n    i += width;\n    k += 8;\n  }\n\n  atomicAdd(&mul[b * width + w], res);\n}\n\nvoid vecquant8matmul_cuda(\n  torch::Tensor vec,\n  torch::Tensor mat,\n  torch::Tensor mul,\n  torch::Tensor scales,\n  torch::Tensor zeros,\n  int groupsize\n) {\n  int batch = vec.size(0);\n  int vec_height = vec.size(1);\n  int height = mat.size(0);\n  int width = mat.size(1);\n  int zero_width = zeros.size(1);\n\n  dim3 blocks(\n    (height + BLOCKHEIGHT8 - 1) / BLOCKHEIGHT8,\n    (width + BLOCKWIDTH - 1) / BLOCKWIDTH,\n    batch\n  );\n  dim3 threads(BLOCKWIDTH);\n\n  AT_DISPATCH_FLOATING_TYPES(\n    vec.type(), \"vecquant8matmul_cuda\", ([&] {\n      VecQuant8MatMulKernel<<<blocks, threads>>>(\n        vec.data<scalar_t>(), mat.data<int>(), mul.data<scalar_t>(),\n        scales.data<scalar_t>(), zeros.data<int>(),\n        batch, vec_height, height, width, zero_width, groupsize\n      );\n    })\n  );\n}\n\ntemplate <typename scalar_t>\n__global__ void VecQuant8MatMulKernel(\n    const  scalar_t* __restrict__ vec,\n    const       int* __restrict__ mat,\n           scalar_t* __restrict__ mul,\n    const  scalar_t* __restrict__ scales,\n    const  \tint* __restrict__ zeros,\n    int batch,\n    int vec_height,\n    int height,\n    int width,\n    int zero_width,\n    int groupsize\n) {\n  int b = blockIdx.z;\n  int h = BLOCKHEIGHT8 * blockIdx.x;\n  int w = BLOCKWIDTH * blockIdx.y + threadIdx.x;\n\n  __shared__ scalar_t blockvec[BLOCKWIDTH];\n  blockvec[threadIdx.x] = vec[b * vec_height + blockIdx.x * BLOCKWIDTH + threadIdx.x];\n  __syncthreads();\n\n  scalar_t res = 0;\n  int i = width * h + w;\n  int g_h = h * 4;\n  int k = 0;\n  \n  int z_w = w / 4; \n  int z_mod = (w % 4) * 8;\n\n  unsigned int tmp;\n\n  while (k < BLOCKWIDTH) { \n    tmp = as_unsigned(mat[i]);\n\t\n    int g = (g_h + k) / groupsize;\n    scalar_t scale = scales[g * width + w];\n    scalar_t zero = scale * scalar_t(((as_unsigned(zeros[g * zero_width + z_w]) >> z_mod) & 0xFF) + 1);\n\t\n    res += (scale * scalar_t((tmp >> 0) & 0xFF) - zero) * blockvec[k + 0];\n    res += (scale * scalar_t((tmp >> 8) & 0xFF) - zero) * blockvec[k + 1];\n    res += (scale * scalar_t((tmp >> 16) & 0xFF) - zero) * blockvec[k + 2];\n    res += (scale * scalar_t((tmp >> 24) & 0xFF) - zero) * blockvec[k + 3];\n\t\n    i += width;\n    k += 4;\n  }\n\n  atomicAdd(&mul[b * width + w], res);\n}\n"
  },
  {
    "path": "gptq/setup_cuda.py",
    "content": "from setuptools import setup, Extension\nfrom torch.utils import cpp_extension\n\nsetup(\n    name='quant_cuda',\n    ext_modules=[cpp_extension.CUDAExtension(\n        'quant_cuda', ['quant_cuda.cpp', 'quant_cuda_kernel.cu']\n    )],\n    cmdclass={'build_ext': cpp_extension.BuildExtension}\n)\n"
  },
  {
    "path": "gptq/test_kernel.py",
    "content": "import torch\nimport torch.nn as nn\n\nimport quant_cuda\nimport os\nos.environ['CUDA_LAUNCH_BLOCKING'] = \"1\"\n\ntorch.backends.cuda.matmul.allow_tf32 = False\ntorch.backends.cudnn.allow_tf32 = False\n\nprint('Benchmarking LLaMa-7B FC2 matvec ...')\n\nDEV = torch.device('cuda:0')\n\nB = 5\nL = 128\nM = 4096\nN = 11008\n\nDTYPE = torch.half\nmat = torch.randn((M, N), device=DEV, dtype=DTYPE)\nvec = torch.randn((B, M), device=DEV, dtype=DTYPE)\nmul = torch.zeros((B, N), device=DEV, dtype=DTYPE)\n\nCOUNT = 1000\nimport time\ntick = time.time()\nfor _ in range(COUNT):\n    torch.matmul(vec, mat, out=mul) \n    torch.cuda.synchronize()\nprint('FP16:', (time.time() - tick) / COUNT)\n\nDTYPE = torch.float\nmat = mat.to(DTYPE)\nvec = vec.to(DTYPE)\nmul = mul.to(DTYPE)\n\nmat = torch.randint(-1000000000, 1000000000, (M // 256 * 32, N), device=DEV, dtype=torch.int)\nscales = torch.randn(N, device=DEV, dtype=DTYPE)\nzeros = torch.randint(-1000000000, 1000000000, (1, N // 256 * 32), device=DEV, dtype=torch.int)\n\nCOUNT = 1000\nimport time\ntick = time.time()\nfor _ in range(COUNT):\n    quant_cuda.vecquant2matmul(vec, mat, mul, scales, zeros, M)\n    torch.cuda.synchronize()\nprint('2bit:', (time.time() - tick) / COUNT)\n\ntick = time.time()\nfor _ in range(COUNT):\n    quant_cuda.vecquant3matmul(vec, mat, mul, scales, zeros, M)\n    torch.cuda.synchronize()\nprint('3bit:', (time.time() - tick) / COUNT)\n\ntick = time.time()\nfor _ in range(COUNT):\n    quant_cuda.vecquant4matmul(vec, mat, mul, scales, zeros, M)\n    torch.cuda.synchronize()\nprint('4bit:', (time.time() - tick) / COUNT)\n\ntick = time.time()\nfor _ in range(COUNT):\n    quant_cuda.vecquant8matmul(vec, mat, mul, scales, zeros, M)\n    torch.cuda.synchronize()\nprint('8bit:', (time.time() - tick) / COUNT)\nprint('Verifiying kernel correctness ...')\n\nM = 4096\nN = 11008\n\nfrom quant import *\n\nlayer = nn.Linear(M, N)\nvec = torch.randn(B,L,M).to(DEV)\n\nquantizer = Quantizer()\nquantizer.configure(2, perchannel=True, sym=False, mse=False)\nquantizer.find_params(layer.weight.data, weight=True)\nlayer.weight.data = quantize(\n    layer.weight.data, quantizer.scale, quantizer.zero, quantizer.maxq\n)\n\nqlayer = QuantLinear(2, -1, layer.in_features, layer.out_features)\nqlayer.pack(layer, quantizer.scale, quantizer.zero)\n\nqlayer = qlayer.to(DEV)\nlayer = layer.to(DEV)\n\nwith torch.no_grad():\n    print('2bit Simu:', qlayer(vec))\n    print('2bit Kern:', layer.to(DEV)(vec))\n    print('\\n')\n\nlayer = nn.Linear(M, N)\nvec = torch.randn(B,L,M).to(DEV)\n\nquantizer = Quantizer()\nquantizer.configure(3, perchannel=True, sym=False, mse=False)\nquantizer.find_params(layer.weight.data, weight=True)\nlayer.weight.data = quantize(\n    layer.weight.data, quantizer.scale, quantizer.zero, quantizer.maxq\n)\n\nqlayer = QuantLinear(3, -1, layer.in_features, layer.out_features)\nqlayer.pack(layer, quantizer.scale, quantizer.zero)\n\nqlayer = qlayer.to(DEV)\nlayer = layer.to(DEV)\n\nwith torch.no_grad():\n    print('3bit Simu:', qlayer(vec))\n    print('3bit Kern:', layer.to(DEV)(vec))\n    print('\\n')\n\nlayer = nn.Linear(M, N)\nvec = torch.randn(B,L,M).to(DEV)\n\nquantizer = Quantizer()\nquantizer.configure(4, perchannel=True, sym=False, mse=False)\nquantizer.find_params(layer.weight.data, weight=True)\nlayer.weight.data = quantize(\n    layer.weight.data, quantizer.scale, quantizer.zero, quantizer.maxq\n)\n\nqlayer = QuantLinear(4, -1, layer.in_features, layer.out_features)\nqlayer.pack(layer, quantizer.scale, quantizer.zero)\n\nqlayer = qlayer.to(DEV)\nlayer = layer.to(DEV) \n\nwith torch.no_grad():\n    print('4bit Simu:', qlayer(vec))\n    print('4bit Kern:', layer.to(DEV)(vec))\n    print('\\n')\n\nlayer = nn.Linear(M, N)\nvec = torch.randn(B,L,M).to(DEV)\n\nquantizer = Quantizer()\nquantizer.configure(8, perchannel=True, sym=False, mse=False)\nquantizer.find_params(layer.weight.data, weight=True)\nlayer.weight.data = quantize(\n    layer.weight.data, quantizer.scale, quantizer.zero, quantizer.maxq\n)\n\nqlayer = QuantLinear(8, -1, layer.in_features, layer.out_features)\nqlayer.pack(layer, quantizer.scale, quantizer.zero)\n\nqlayer = qlayer.to(DEV)\nlayer = layer.to(DEV)\n\nwith torch.no_grad():\n    print('8bit Simu:', qlayer(vec))\n    print('8bit Kern:', layer.to(DEV)(vec))\n"
  },
  {
    "path": "predictors/base.py",
    "content": "import copy\nfrom abc import ABC, abstractmethod\n\n\ndef parse_codeblock(text):\n    lines = text.split(\"\\n\")\n    for i, line in enumerate(lines):\n        if \"```\" in line:\n            if line != \"```\":\n                lines[i] = f'<pre><code class=\"{lines[i][3:]}\">'\n            else:\n                lines[i] = '</code></pre>'\n        else:\n            if i > 0:\n                lines[i] = \"<br/>\" + line.replace(\"<\", \"&lt;\").replace(\n                    \">\", \"&gt;\")\n    return \"\".join(lines)\n\n\nclass BasePredictor(ABC):\n\n    @abstractmethod\n    def __init__(self, model_name, predict_mode='tuple'):\n        self.model = None\n        self.tokenizer = None\n        self.model_name = model_name\n        self.predict_mode = predict_mode\n\n    @abstractmethod\n    def stream_chat_continue(self, *args, **kwargs):\n        raise NotImplementedError\n\n    def predict_continue(self, *args, **kwargs):\n        if self.predict_mode == 'tuple':\n            yield from self.predict_continue_tuple(*args, **kwargs)\n        else:\n            yield from self.predict_continue_dict(*args, **kwargs)\n\n    def predict_continue_tuple(self, query, latest_message, max_length, top_p,\n                               temperature, allow_generate, history,\n                               last_state, *args, **kwargs):\n        last_state[0] = copy.deepcopy(history)\n        last_state[1] = query\n        last_state[2] = latest_message\n        if history is None:\n            history = []\n        allow_generate[0] = True\n        history.append((query, latest_message))\n        for response in self.stream_chat_continue(\n                self.model,\n                self.tokenizer,\n                query=query,\n                history=history,\n                max_length=max_length,\n                top_p=top_p,\n                temperature=temperature):\n            history[-1] = (history[-1][0], response)\n            history_colorful = copy.deepcopy(history)\n            colorful_response = f'<span style=\"color:red\">{latest_message}</span>{response[len(latest_message):]}'\n            history_colorful[-1] = (history_colorful[-1][0], colorful_response)\n            yield history_colorful, '', ''\n            if not allow_generate[0]:\n                break\n\n    def predict_continue_dict(self, query, latest_message, max_length, top_p,\n                              temperature, allow_generate, history, last_state,\n                              *args, **kwargs):\n        last_state[0] = copy.deepcopy(history)\n        last_state[1] = query\n        last_state[2] = latest_message\n        if history is None:\n            history = []\n        allow_generate[0] = True\n        history.append({\"role\": \"user\", \"content\": query})\n        history.append({\"role\": \"assistant\", \"content\": latest_message})\n        for response in self.stream_chat_continue(\n                self.model,\n                self.tokenizer,\n                query=query,\n                history=history,\n                max_length=max_length,\n                top_p=top_p,\n                temperature=temperature):\n            history[-1][\"content\"] = response\n            history_colorful = copy.deepcopy(history)\n            colorful_response = f'<span style=\"color:red\">{latest_message}</span>{response[len(latest_message):]}'\n            history_colorful[-1][\"content\"] = colorful_response\n            history_tuple = []\n            for i in range(0, len(history_colorful), 2):\n                history_tuple.append((history_colorful[i][\"content\"],\n                                      history_colorful[i + 1][\"content\"]))\n            yield history_tuple, '', ''\n            if not allow_generate[0]:\n                break\n"
  },
  {
    "path": "predictors/chatglm2_predictor.py",
    "content": "import time\nfrom typing import List, Tuple\n\nimport torch\nfrom transformers import AutoModel, AutoTokenizer\nfrom transformers import LogitsProcessor, LogitsProcessorList\n\nfrom predictors.base import BasePredictor, parse_codeblock\n\n\nclass InvalidScoreLogitsProcessor(LogitsProcessor):\n    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:\n        if torch.isnan(scores).any() or torch.isinf(scores).any():\n            scores.zero_()\n            scores[..., 5] = 5e4\n        return scores\n\n\nclass ChatGLM2(BasePredictor):\n\n    def __init__(self, model_name):\n        self.predict_mode = 'tuple'\n        print(f'Loading model {model_name}')\n        start = time.perf_counter()\n        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n        self.tokenizer = AutoTokenizer.from_pretrained(\n            model_name, trust_remote_code=True, resume_download=True)\n        if 'slim' in model_name:\n            model = AutoModel.from_pretrained(\n                model_name, trust_remote_code=True,\n                resume_download=True)\n            if self.device == 'cuda':\n                model = model.half().to(self.device)\n            else:\n                model = model.float()\n        elif 'int4' in model_name:\n            model = AutoModel.from_pretrained(\n                model_name, trust_remote_code=True,\n                resume_download=True)\n            if self.device == 'cuda':\n                model = model.half().to(self.device)\n            else:\n                model = model.float()\n        else:\n            model = AutoModel.from_pretrained(\n                model_name,\n                trust_remote_code=True,\n                resume_download=True,\n                low_cpu_mem_usage=True,\n                torch_dtype=torch.float16\n                if self.device == 'cuda' else torch.float32,\n                device_map={'': self.device})\n            if self.device == 'cpu':\n                model = model.float()\n        model = model.eval()\n        self.model = model\n        self.model_name = model_name\n        end = time.perf_counter()\n        print(\n            f'Successfully loaded model {model_name}, time cost: {end - start:.2f}s'\n        )\n\n    @torch.no_grad()\n    def stream_chat_continue(self,\n                             model,\n                             tokenizer, query: str, history: List[Tuple[str, str]] = None, past_key_values=None,\n                             max_length: int = 8192, do_sample=True, top_p=0.8, temperature=0.8, logits_processor=None,\n                             return_past_key_values=False, **kwargs):\n        if history is None:\n            history = []\n        if logits_processor is None:\n            logits_processor = LogitsProcessorList()\n        if len(history) > 0:\n            answer = history[-1][1]\n        else:\n            answer = ''\n        logits_processor.append(\n            InvalidScoreLogitsProcessor())\n        gen_kwargs = {\n            \"max_length\": max_length,\n            \"do_sample\": do_sample,\n            \"top_p\": top_p,\n            \"temperature\": temperature,\n            \"logits_processor\": logits_processor,\n            **kwargs\n        }\n\n        if not history:\n            prompt = query\n        else:\n            prompt = \"\"\n            for i, (old_query, response) in enumerate(history):\n                if i != len(history) - 1:\n                    prompt += \"[Round {}]\\n\\n问：{}\\n\\n答：{}\\n\\n\".format(\n                        i, old_query, response)\n                else:\n                    prompt += \"[Round {}]\\n\\n问：{}\\n\\n答：\\n\\n\".format(i, old_query)\n        batch_input = tokenizer([prompt], return_tensors=\"pt\")\n        batch_input = batch_input.to(model.device)\n\n        batch_answer = tokenizer(answer, return_tensors=\"pt\")\n        batch_answer = batch_answer.to(model.device)\n\n        final_input_ids = torch.cat(\n            [batch_input['input_ids'], batch_answer['input_ids'][:, 3:]],\n            dim=-1)\n        final_input_ids = final_input_ids.to(model.device)\n\n        final_input = {}\n        final_input['input_ids'] = final_input_ids\n        final_input['position_ids'] = model.get_position_ids(final_input_ids, device=final_input_ids.device)\n        final_input['attention_mask'] = torch.ones(final_input_ids.shape, dtype=torch.long, device=final_input_ids.device)\n\n        for outputs in model.stream_generate(**final_input, past_key_values=past_key_values,\n                                             return_past_key_values=return_past_key_values, **gen_kwargs):\n            if return_past_key_values:\n                outputs, past_key_values = outputs\n            outputs = outputs.tolist()[0][len(batch_input[\"input_ids\"][0]):]\n            response = tokenizer.decode(outputs)\n            if response and response[-1] != \"�\":\n                response = model.process_response(response)\n                yield parse_codeblock(response)\n\n\ndef test():\n    model_name = 'chatglm2-6b'\n\n    predictor = ChatGLM2(model_name)\n    top_p = 0.01\n    max_length = 128\n    temperature = 0.01\n\n    history = []\n    line = '你是谁？'\n    last_message = '我是张三丰，我是武当派'\n    print(line)\n    for x in predictor.predict_continue(\n            query=line, latest_message=last_message,\n            max_length=max_length, top_p=top_p, temperature=temperature,\n            allow_generate=[True], history=history, last_state=[[], None, None]):\n        print(x[0][-1][1])\n\n\ndef test2():\n    from chatglm2.modeling_chatglm import ChatGLMForConditionalGeneration\n    model_name = 'chatglm2-6b'\n    device = 'cuda'\n    tokenizer = AutoTokenizer.from_pretrained(\n        model_name, trust_remote_code=True, resume_download=True)\n    model = ChatGLMForConditionalGeneration.from_pretrained(\n        model_name,\n        trust_remote_code=True,\n        resume_download=True,\n        low_cpu_mem_usage=True,\n        torch_dtype=torch.float16 if device == 'cuda' else torch.float32,\n        device_map={'': device})\n    model = model.eval()\n\n    query = '继续'\n    history = [('你是谁？', '我是张三丰，')]\n    max_length = 128\n    top_p = 0.95\n    temperature = 0.8\n\n    for response, new_history in model.stream_chat(\n            tokenizer=tokenizer,\n            query=query,\n            history=history,\n            max_length=max_length,\n            top_p=top_p,\n            temperature=temperature):\n        print(response, new_history)\n\n\nif __name__ == '__main__':\n    test()\n"
  },
  {
    "path": "predictors/chatglm3_predictor.py",
    "content": "import time\nimport json\nfrom typing import List, Dict\n\nimport torch\nfrom transformers import AutoModel, AutoTokenizer\nfrom transformers import LogitsProcessor, LogitsProcessorList\n\nfrom predictors.base import BasePredictor, parse_codeblock\n\n\nclass InvalidScoreLogitsProcessor(LogitsProcessor):\n\n    def __call__(self, input_ids: torch.LongTensor,\n                 scores: torch.FloatTensor) -> torch.FloatTensor:\n        if torch.isnan(scores).any() or torch.isinf(scores).any():\n            scores.zero_()\n            scores[..., 5] = 5e4\n        return scores\n\n\nclass ChatGLM3(BasePredictor):\n\n    def __init__(self, model_name):\n        self.predict_mode = 'dict'\n        print(f'Loading model {model_name}')\n        start = time.perf_counter()\n        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n        self.tokenizer = AutoTokenizer.from_pretrained(\n            model_name, trust_remote_code=True, resume_download=True)\n        if 'slim' in model_name:\n            model = AutoModel.from_pretrained(\n                model_name, trust_remote_code=True, resume_download=True)\n            if self.device == 'cuda':\n                model = model.half().to(self.device)\n            else:\n                model = model.float()\n        elif 'int4' in model_name:\n            model = AutoModel.from_pretrained(\n                model_name, trust_remote_code=True, resume_download=True)\n            if self.device == 'cuda':\n                model = model.half().to(self.device)\n            else:\n                model = model.float()\n        else:\n            model = AutoModel.from_pretrained(\n                model_name,\n                trust_remote_code=True,\n                resume_download=True,\n                low_cpu_mem_usage=True,\n                torch_dtype=torch.float16\n                if self.device == 'cuda' else torch.float32,\n                device_map={'': self.device})\n            if self.device == 'cpu':\n                model = model.float()\n        model = model.eval()\n        self.model = model\n        self.model_name = model_name\n        end = time.perf_counter()\n        print(\n            f'Successfully loaded model {model_name}, time cost: {end - start:.2f}s'\n        )\n\n    @torch.inference_mode()\n    def stream_chat_continue(self,\n                             model,\n                             tokenizer,\n                             query: str,\n                             history: List[Dict] = None,\n                             role: str = \"user\",\n                             past_key_values=None,\n                             max_length: int = 8192,\n                             do_sample=True,\n                             top_p=0.8,\n                             temperature=0.8,\n                             logits_processor=None,\n                             return_past_key_values=False,\n                             **kwargs):\n        if history is None:\n            history = []\n        if logits_processor is None:\n            logits_processor = LogitsProcessorList()\n        logits_processor.append(InvalidScoreLogitsProcessor())\n\n        eos_token_id = [\n            tokenizer.eos_token_id,\n            tokenizer.get_command(\"<|user|>\"),\n            tokenizer.get_command(\"<|observation|>\")\n        ]\n\n        gen_kwargs = {\n            \"max_length\": max_length,\n            \"do_sample\": do_sample,\n            \"top_p\": top_p,\n            \"temperature\": temperature,\n            \"logits_processor\": logits_processor,\n            **kwargs\n        }\n\n        answer = history[-1][\"content\"]\n\n        input_ids = []\n        for item in history[:-1]:\n            content = item[\"content\"]\n            if item[\"role\"] == \"system\" and \"tools\" in item:\n                content = content + \"\\n\" + json.dumps(item[\"tools\"], indent=4, ensure_ascii=False)\n            input_ids.extend(tokenizer.build_single_message(item[\"role\"], item.get(\"metadata\", \"\"), content))\n        batch_input = tokenizer.batch_encode_plus([input_ids], return_tensors=\"pt\", is_split_into_words=True)\n        batch_input = batch_input.to(model.device)\n\n        answer_input_ids = tokenizer.build_single_message(\"assistant\", \"\", answer)\n        batch_answer = tokenizer.batch_encode_plus([answer_input_ids], return_tensors=\"pt\", is_split_into_words=True)\n        batch_answer = batch_answer.to(model.device)\n\n        final_input_ids = torch.cat([batch_input['input_ids'], batch_answer['input_ids'][:, 2:]], dim=-1)\n        final_input_ids = final_input_ids.to(model.device)\n\n        final_input = {}\n        final_input['input_ids'] = final_input_ids\n        final_input['position_ids'] = model.get_position_ids(final_input_ids, device=final_input_ids.device)\n        final_input['attention_mask'] = torch.ones(final_input_ids.shape, dtype=torch.long, device=final_input_ids.device)\n\n        for outputs in model.stream_generate(\n                **final_input,\n                past_key_values=past_key_values,\n                eos_token_id=eos_token_id,\n                return_past_key_values=return_past_key_values,\n                **gen_kwargs):\n            if return_past_key_values:\n                outputs, past_key_values = outputs\n            outputs = outputs.tolist()[0][\n                len(batch_input[\"input_ids\"]\n                    [0]):-1]  # Exclude the last token if it's EOS\n            response = tokenizer.decode(outputs)\n            if response and response[-1] != \"�\":\n                response, new_history = model.process_response(\n                    response, history)\n                yield response\n\n\ndef test():\n    model_name = 'THUDM/chatglm3-6b'\n\n    predictor = ChatGLM3(model_name)\n    top_p = 0.01\n    max_length = 128\n    temperature = 0.01\n\n    history = []\n    query = '你是谁？'\n    last_message = '我是张三丰，我是武当派'\n\n    print(query)\n    for x in predictor.predict_continue_dict(\n            query=query,\n            latest_message=last_message,\n            max_length=max_length,\n            top_p=top_p,\n            temperature=temperature,\n            allow_generate=[True],\n            history=history,\n            last_state=[[], None, None]):\n        print(x[0][-1])\n\n\ndef test2():\n    from chatglm3.modeling_chatglm import ChatGLMForConditionalGeneration\n    model_name = 'THUDM/chatglm3-6b'\n    device = 'cuda'\n    tokenizer = AutoTokenizer.from_pretrained(\n        model_name, trust_remote_code=True, resume_download=True)\n    model = ChatGLMForConditionalGeneration.from_pretrained(\n        model_name,\n        trust_remote_code=True,\n        resume_download=True,\n        low_cpu_mem_usage=True,\n        torch_dtype=torch.float16 if device == 'cuda' else torch.float32,\n        device_map={'': device})\n    model = model.eval()\n\n    query = '继续'\n    history = [{\n        'role': 'user',\n        'content': '你是谁？'\n    }, {\n        'role': 'assistant',\n        'content': '我是张三丰，'\n    }]\n    max_length = 128\n    top_p = 0.95\n    temperature = 0.8\n\n    for response, new_history in model.stream_chat(\n            tokenizer=tokenizer,\n            query=query,\n            history=history,\n            max_length=max_length,\n            top_p=top_p,\n            temperature=temperature):\n        print(response, new_history)\n\n\nif __name__ == '__main__':\n    test()\n"
  },
  {
    "path": "predictors/chatglm_predictor.py",
    "content": "import time\nfrom typing import List, Tuple\n\nimport torch\nfrom transformers import AutoModel, AutoTokenizer\nfrom transformers import LogitsProcessor, LogitsProcessorList\n\nfrom predictors.base import BasePredictor, parse_codeblock\n\n\nclass InvalidScoreLogitsProcessor(LogitsProcessor):\n\n    def __init__(self, start_pos=5):\n        self.start_pos = start_pos\n\n    def __call__(self, input_ids: torch.LongTensor,\n                 scores: torch.FloatTensor) -> torch.FloatTensor:\n        if torch.isnan(scores).any() or torch.isinf(scores).any():\n            scores.zero_()\n            scores[..., self.start_pos] = 5e4\n        return scores\n\n\nclass ChatGLM(BasePredictor):\n\n    def __init__(self, model_name):\n        self.predict_mode = 'tuple'\n        print(f'Loading model {model_name}')\n        start = time.perf_counter()\n        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n        self.tokenizer = AutoTokenizer.from_pretrained(\n            model_name, trust_remote_code=True, resume_download=True)\n        if 'slim' in model_name:\n            model = AutoModel.from_pretrained(\n                model_name, trust_remote_code=True,\n                resume_download=True)\n            if self.device == 'cuda':\n                model = model.half().to(self.device)\n            else:\n                model = model.float()\n        elif 'int4' in model_name:\n            model = AutoModel.from_pretrained(\n                model_name, trust_remote_code=True,\n                resume_download=True)\n            if self.device == 'cuda':\n                model = model.half().to(self.device)\n            else:\n                model = model.float()\n        else:\n            model = AutoModel.from_pretrained(\n                model_name,\n                trust_remote_code=True,\n                resume_download=True,\n                low_cpu_mem_usage=True,\n                torch_dtype=torch.float16\n                if self.device == 'cuda' else torch.float32,\n                device_map={'': self.device})\n            if self.device == 'cpu':\n                model = model.float()\n        model = model.eval()\n        self.model = model\n        self.model_name = model_name\n        end = time.perf_counter()\n        print(\n            f'Successfully loaded model {model_name}, time cost: {end - start:.2f}s'\n        )\n\n    @torch.no_grad()\n    def stream_chat_continue(self,\n                             model,\n                             tokenizer,\n                             query: str,\n                             history: List[Tuple[str, str]] = None,\n                             max_length: int = 2048,\n                             do_sample=True,\n                             top_p=0.7,\n                             temperature=0.95,\n                             logits_processor=None,\n                             **kwargs):\n        if history is None:\n            history = []\n        if logits_processor is None:\n            logits_processor = LogitsProcessorList()\n        if len(history) > 0:\n            answer = history[-1][1]\n        else:\n            answer = ''\n        logits_processor.append(\n            InvalidScoreLogitsProcessor(5))\n        gen_kwargs = {\n            \"max_length\": max_length,\n            \"do_sample\": do_sample,\n            \"top_p\": top_p,\n            \"temperature\": temperature,\n            \"logits_processor\": logits_processor,\n            **kwargs\n        }\n        if not history:\n            prompt = query\n        else:\n            prompt = \"\"\n            for i, (old_query, response) in enumerate(history):\n                if i != len(history) - 1:\n                    prompt += \"[Round {}]\\n问：{}\\n答：{}\\n\".format(\n                        i, old_query, response)\n                else:\n                    prompt += \"[Round {}]\\n问：{}\\n答：\".format(i, old_query)\n        batch_input = tokenizer([prompt], return_tensors=\"pt\", padding=True)\n        batch_input = batch_input.to(model.device)\n\n        batch_answer = tokenizer(answer, return_tensors=\"pt\")\n        batch_answer = batch_answer.to(model.device)\n\n        input_length = len(batch_input['input_ids'][0])\n        final_input_ids = torch.cat(\n            [batch_input['input_ids'], batch_answer['input_ids'][:, :-2]],\n            dim=-1)\n        final_input_ids = final_input_ids.to(model.device)\n\n        attention_mask = model.get_masks(\n            final_input_ids, device=final_input_ids.device)\n\n        batch_input['input_ids'] = final_input_ids\n        batch_input['attention_mask'] = attention_mask\n\n        input_ids = final_input_ids\n        MASK, gMASK = self.model.config.bos_token_id - 4, self.model.config.bos_token_id - 3\n        mask_token = MASK if MASK in input_ids else gMASK\n        mask_positions = [seq.tolist().index(mask_token) for seq in input_ids]\n        batch_input['position_ids'] = self.model.get_position_ids(\n            input_ids, mask_positions, device=input_ids.device)\n\n        for outputs in model.stream_generate(**batch_input, **gen_kwargs):\n            outputs = outputs.tolist()[0][input_length:]\n            response = tokenizer.decode(outputs)\n            response = model.process_response(response)\n            yield parse_codeblock(response)\n\n\ndef test():\n    model_name = 'chatglm-6b'\n    # model_name = 'silver/chatglm-6b-int4-slim'\n\n    predictor = ChatGLM(model_name)\n    top_p = 0.95\n    max_length = 128\n    temperature = 0.8\n\n    line = '你是谁？'\n    last_message = '我是张三丰，'\n    print(line)\n    for x in predictor.predict_continue(\n            query=line, latest_message=last_message,\n            max_length=max_length, top_p=top_p, temperature=temperature,\n            allow_generate=[True], history=None, last_state=[[], None, None]):\n        print(x[0][-1][1])\n\n\nif __name__ == '__main__':\n    test()\n"
  },
  {
    "path": "predictors/debug.py",
    "content": "class Debug:\n    def __init__(self, *args, **kwargs):\n        pass\n\n    def inference(self, *args, **kwargs):\n        import random\n        sample_outputs = [\n            '我是杨开心。',\n            '我两岁半了。',\n            '我喜欢吃雪糕。',\n        ]\n        one_output = random.choice(sample_outputs)\n        for i in range(len(one_output)):\n            yield one_output[:i + 1]\n\n    def predict_continue(self, *args, **kwargs):\n        yield from self.inference(*args, **kwargs)\n"
  },
  {
    "path": "predictors/glm4_predictor.py",
    "content": "import time\nimport json\nfrom typing import List, Dict\n\nimport torch\nfrom transformers import AutoModel, AutoTokenizer\nfrom transformers import LogitsProcessor, LogitsProcessorList\nfrom transformers import BitsAndBytesConfig\n\nfrom predictors.base import BasePredictor, parse_codeblock\n\n\nclass InvalidScoreLogitsProcessor(LogitsProcessor):\n\n    def __call__(self, input_ids: torch.LongTensor,\n                 scores: torch.FloatTensor) -> torch.FloatTensor:\n        if torch.isnan(scores).any() or torch.isinf(scores).any():\n            scores.zero_()\n            scores[..., 5] = 5e4\n        return scores\n\n\nclass GLM4(BasePredictor):\n\n    def __init__(self, model_name, int4=False):\n        self.predict_mode = 'dict'\n        print(f'Loading model {model_name}')\n        start = time.perf_counter()\n        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n        self.tokenizer = AutoTokenizer.from_pretrained(\n            model_name, trust_remote_code=True)\n        if 'slim' in model_name:\n            model = AutoModel.from_pretrained(\n                model_name, trust_remote_code=True)\n            if self.device == 'cuda':\n                model = model.half().to(self.device)\n            else:\n                model = model.float()\n        elif 'int4' in model_name:\n            model = AutoModel.from_pretrained(\n                model_name, trust_remote_code=True)\n            if self.device == 'cuda':\n                model = model.half().to(self.device)\n            else:\n                model = model.float()\n        else:\n            model = AutoModel.from_pretrained(\n                model_name,\n                trust_remote_code=True,\n                low_cpu_mem_usage=True,\n                torch_dtype=torch.float16\n                if self.device == 'cuda' else torch.float32,\n                quantization_config=BitsAndBytesConfig(\n                    load_in_4bit=True) if int4 else None,\n                device_map={'': self.device})\n            if self.device == 'cpu':\n                model = model.float()\n        model = model.eval()\n        self.model = model\n        self.model_name = model_name\n        end = time.perf_counter()\n        print(\n            f'Successfully loaded model {model_name}, time cost: {end - start:.2f}s'\n        )\n\n    @torch.inference_mode()\n    def stream_chat_continue(self,\n                             model,\n                             tokenizer,\n                             query: str,\n                             history: List[Dict] = None,\n                             role: str = \"user\",\n                             past_key_values=None,\n                             max_length: int = 8192,\n                             do_sample=True,\n                             top_p=0.8,\n                             temperature=0.8,\n                             logits_processor=None,\n                             return_past_key_values=False,\n                             **kwargs):\n        if history is None:\n            history = []\n        if logits_processor is None:\n            logits_processor = LogitsProcessorList()\n        logits_processor.append(InvalidScoreLogitsProcessor())\n\n        eos_token_id = [\n            tokenizer.eos_token_id,\n            tokenizer.convert_tokens_to_ids(\"<|user|>\"),\n            tokenizer.convert_tokens_to_ids(\"<|observation|>\")\n        ]\n\n        gen_kwargs = {\n            \"max_length\": max_length,\n            \"do_sample\": do_sample,\n            \"top_p\": top_p,\n            \"temperature\": temperature,\n            \"logits_processor\": logits_processor,\n            **kwargs\n        }\n\n        answer = history[-1][\"content\"]\n\n        input_ids = []\n        for item in history[:-1]:\n            content = item[\"content\"]\n            if item[\"role\"] == \"system\" and \"tools\" in item:\n                content = content + \"\\n\" + json.dumps(\n                    item[\"tools\"], indent=4, ensure_ascii=False)\n            input_ids.extend(\n                tokenizer.build_single_message(item[\"role\"],\n                                               item.get(\"metadata\", \"\"),\n                                               content))\n        batch_input = tokenizer.batch_encode_plus([input_ids],\n                                                  return_tensors=\"pt\",\n                                                  is_split_into_words=True)\n        batch_input = batch_input.to(model.device)\n\n        answer_input_ids = tokenizer.build_single_message(\n            \"assistant\", \"\", answer)\n        batch_answer = tokenizer.batch_encode_plus([answer_input_ids],\n                                                   return_tensors=\"pt\",\n                                                   is_split_into_words=True)\n        batch_answer = batch_answer.to(model.device)\n\n        final_input_ids = torch.cat(\n            [batch_input['input_ids'], batch_answer['input_ids'][:, 2:]],\n            dim=-1)\n        final_input_ids = final_input_ids.to(model.device)\n\n        final_input = {}\n        final_input['input_ids'] = final_input_ids\n        final_input['position_ids'] = model.get_position_ids(\n            final_input_ids, device=final_input_ids.device)\n        final_input['attention_mask'] = torch.ones(\n            final_input_ids.shape,\n            dtype=torch.long,\n            device=final_input_ids.device)\n\n        for outputs in model.stream_generate(\n                **final_input,\n                past_key_values=past_key_values,\n                eos_token_id=eos_token_id,\n                return_past_key_values=return_past_key_values,\n                **gen_kwargs):\n            if return_past_key_values:\n                outputs, past_key_values = outputs\n            outputs = outputs.tolist()[0][\n                len(batch_input[\"input_ids\"]\n                    [0]):-1]  # Exclude the last token if it's EOS\n            response = tokenizer.decode(outputs)\n            if response and response[-1] != \"�\":\n                response, new_history = model.process_response(\n                    response, history)\n                yield response\n\n\ndef test():\n    model_name = 'THUDM/glm-4-9b-chat-1m'\n\n    predictor = GLM4(model_name)\n    top_p = 0.01\n    max_length = 128\n    temperature = 0.01\n\n    history = []\n    query = '你是谁？'\n    last_message = '我是张三丰，我是武当派'\n\n    print(query)\n    for x in predictor.predict_continue_dict(\n            query=query,\n            latest_message=last_message,\n            max_length=max_length,\n            top_p=top_p,\n            temperature=temperature,\n            allow_generate=[True],\n            history=history,\n            last_state=[[], None, None]):\n        print(x[0][-1])\n\n\ndef test2():\n    from glm4.modeling_chatglm import ChatGLMForConditionalGeneration\n    model_name = 'THUDM/glm-4-9b-chat-1m'\n    device = 'cuda'\n    tokenizer = AutoTokenizer.from_pretrained(\n        model_name, trust_remote_code=True)\n    model = ChatGLMForConditionalGeneration.from_pretrained(\n        model_name,\n        trust_remote_code=True,\n        low_cpu_mem_usage=True,\n        torch_dtype=torch.bfloat16 if device == 'cuda' else torch.float32,\n        device_map={'': device})\n    model = model.eval()\n\n    query = '继续'\n    history = [{\n        'role': 'user',\n        'content': '你是谁？'\n    }, {\n        'role': 'assistant',\n        'content': '我是张三丰，我是武当派'\n    }]\n    max_length = 128\n    top_p = 0.95\n    temperature = 0.8\n\n    for response, new_history in model.stream_chat(\n            tokenizer=tokenizer,\n            query=query,\n            history=history,\n            max_length=max_length,\n            top_p=top_p,\n            temperature=temperature):\n        print(response, new_history)\n\n\nif __name__ == '__main__':\n    test()\n"
  },
  {
    "path": "predictors/llama.py",
    "content": "import copy\nimport time\nimport warnings\nfrom typing import List, Tuple, Optional, Callable\n\nimport torch\nimport torch.nn as nn\nfrom transformers import LlamaForCausalLM, AutoTokenizer\nfrom transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig\nfrom transformers.utils import logging\n\nfrom predictors.base import BasePredictor\n\nlogger = logging.get_logger(__name__)\n\n\n@torch.no_grad()\ndef stream_generate(\n        self,\n        input_ids,\n        generation_config: Optional[GenerationConfig] = None,\n        logits_processor: Optional[LogitsProcessorList] = None,\n        stopping_criteria: Optional[StoppingCriteriaList] = None,\n        prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,\n        **kwargs,\n):\n    batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]\n\n    if generation_config is None:\n        generation_config = self.generation_config\n    generation_config = copy.deepcopy(generation_config)\n    model_kwargs = generation_config.update(**kwargs)\n    bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id\n\n    if isinstance(eos_token_id, int):\n        eos_token_id = [eos_token_id]\n\n    has_default_max_length = kwargs.get(\"max_length\") is None and generation_config.max_length is not None\n    if has_default_max_length and generation_config.max_new_tokens is None:\n        warnings.warn(\n            f\"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. \"\n            \"This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we\"\n            \" recommend using `max_new_tokens` to control the maximum length of the generation.\",\n            UserWarning,\n        )\n    elif generation_config.max_new_tokens is not None:\n        generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length\n        if not has_default_max_length:\n            logger.warn(\n                f\"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=\"\n                f\"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. \"\n                \"Please refer to the documentation for more information. \"\n                \"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)\",\n                UserWarning,\n            )\n\n    if input_ids_seq_length >= generation_config.max_length:\n        input_ids_string = \"decoder_input_ids\" if self.config.is_encoder_decoder else \"input_ids\"\n        logger.warning(\n            f\"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to\"\n            f\" {generation_config.max_length}. This can lead to unexpected behavior. You should consider\"\n            \" increasing `max_new_tokens`.\"\n        )\n\n    # 2. Set generation parameters if not already defined\n    logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()\n    stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()\n\n    logits_processor = self._get_logits_processor(\n        generation_config=generation_config,\n        input_ids_seq_length=input_ids_seq_length,\n        encoder_input_ids=input_ids,\n        prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,\n        logits_processor=logits_processor,\n    )\n\n    stopping_criteria = self._get_stopping_criteria(\n        generation_config=generation_config, stopping_criteria=stopping_criteria\n    )\n    logits_warper = self._get_logits_warper(generation_config)\n\n    unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)\n    scores = None\n    while True:\n        model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)\n        # forward pass to get next token\n        outputs = self(\n            **model_inputs,\n            return_dict=True,\n            output_attentions=False,\n            output_hidden_states=False,\n        )\n\n        next_token_logits = outputs.logits[:, -1, :]\n\n        # pre-process distribution\n        next_token_scores = logits_processor(input_ids, next_token_logits)\n        next_token_scores = logits_warper(input_ids, next_token_scores)\n\n        # sample\n        probs = nn.functional.softmax(next_token_scores, dim=-1)\n        if generation_config.do_sample:\n            next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)\n        else:\n            next_tokens = torch.argmax(probs, dim=-1)\n\n        # update generated ids, model inputs, and length for next step\n        input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)\n        model_kwargs = self._update_model_kwargs_for_generation(\n            outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder\n        )\n        unfinished_sequences = unfinished_sequences.mul((sum(next_tokens != i for i in eos_token_id)).long())\n\n        # stop when each sentence is finished, or if we exceed the maximum length\n        if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):\n            break\n        yield input_ids\n\n\nclass LLaMa(BasePredictor):\n\n    def __init__(self, model_name):\n        self.predict_mode = 'tuple'\n        print(f'Loading model {model_name}')\n        start = time.perf_counter()\n        self.model_name = model_name\n        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n        self.tokenizer = AutoTokenizer.from_pretrained(\n            model_name, resume_download=True)\n        self.model = LlamaForCausalLM.from_pretrained(\n            model_name,\n            low_cpu_mem_usage=True,\n            resume_download=True,\n            torch_dtype=torch.float16 if self.device == 'cuda' else torch.float32,\n            device_map={'': self.device})\n        self.model.eval()\n        end = time.perf_counter()\n        print(f'Successfully loaded model {model_name}, time cost: {end - start:.2f}s')\n\n    @torch.no_grad()\n    def stream_chat_continue(self,\n                             model,\n                             tokenizer,\n                             query: str,\n                             history: List[Tuple[str, str]] = None,\n                             max_length=500,\n                             do_sample=True,\n                             top_p=0.85,\n                             temperature=0.5,\n                             **kwargs):\n        if history is None:\n            history = []\n        if len(history) > 0:\n            answer = history[-1][1]\n        else:\n            answer = ''\n        gen_kwargs = {\n            \"max_length\": max_length,\n            \"do_sample\": do_sample,\n            \"top_p\": top_p,\n            \"temperature\": temperature,\n            **kwargs\n        }\n        if not history:\n            prompt = f'Human: {query} \\n\\nAssistant:'\n        else:\n            prompt = \"\"\n            for i, (old_query, response) in enumerate(history):\n                if i != len(history) - 1:\n                    prompt += f'Human: {old_query} \\n\\nAssistant:{response} \\n\\n'\n                else:\n                    prompt += f'Human: {old_query} \\n\\nAssistant:'\n        batch_input = tokenizer([prompt], return_tensors=\"pt\")\n        batch_input = batch_input.to(model.device)\n\n        batch_answer = tokenizer(answer, return_tensors=\"pt\")\n        batch_answer = batch_answer.to(model.device)\n\n        input_length = len(batch_input['input_ids'][0])\n        final_input_ids = torch.cat(\n            [batch_input['input_ids'], batch_answer['input_ids'][:, :-2]],\n            dim=-1)\n        final_input_ids = final_input_ids.to(model.device)\n        attention_mask = torch.ones_like(final_input_ids).bool().to(\n            model.device)\n        attention_mask[:, input_length:] = False\n\n        batch_input['input_ids'] = final_input_ids\n        batch_input['attention_mask'] = attention_mask\n\n        for outputs in stream_generate(model, **batch_input, **gen_kwargs):\n            outputs = outputs.tolist()[0][input_length:]\n            response = tokenizer.decode(outputs)\n            yield response\n\n\ndef test():\n    model_name = 'BelleGroup/BELLE-LLAMA-7B-2M'\n\n    predictor = LLaMa(model_name)\n    device = predictor.device\n    tokenizer = predictor.tokenizer\n    model = predictor.model\n    min_length = 10\n    max_length = 2048\n    top_p = 0.95\n    temperature = 0.8\n\n    print(\"Human:\")\n    line = input()\n    inputs = 'Human: ' + line.strip() + '\\n\\nAssistant:'\n    input_ids = tokenizer.encode(inputs, return_tensors=\"pt\").to(device)\n\n    with torch.no_grad():\n        generated_ids = model.generate(\n            input_ids,\n            do_sample=True,\n            min_length=min_length,\n            max_length=max_length,\n            top_p=top_p,\n            temperature=temperature,\n        )\n    print(\"Assistant:\\n【\")\n    print(tokenizer.decode([el.item() for el in generated_ids[0]]))\n    print(\"】\\n-------------------------------\\n\")\n\n    for x in predictor.predict_continue(\n            line, '', max_length, top_p, temperature, [True], None):\n        print(\"Assistant:\\n【\")\n        print(x[0][-1][1])\n        print(\"】\\n-------------------------------\\n\")\n\n\nif __name__ == '__main__':\n    test()\n"
  },
  {
    "path": "predictors/llama_gptq.py",
    "content": "import time\nimport torch\nimport transformers\nfrom predictors.llama import LLaMa\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom transformers import AutoTokenizer, LlamaForCausalLM\nfrom gptq.llama_inference import load_quant\nfrom transformers.utils.hub import cached_file\n\n\nclass LLaMaGPTQ(LLaMa):\n    def __init__(self, model_name, checkpoint_path='llama7b-2m-4bit-128g.pt', wbits=4, groupsize=128):\n        self.predict_mode = 'tuple'\n        print(f'Loading model {model_name}')\n        start = time.perf_counter()\n        self.model_name = model_name\n        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n        self.tokenizer = AutoTokenizer.from_pretrained(\n            model_name, resume_download=True)\n        checkpoint_path = cached_file(model_name, checkpoint_path)\n        print(f'Loading model from {checkpoint_path} ...')\n        model: LlamaForCausalLM = load_quant(model_name, checkpoint_path, wbits, groupsize)\n        model.eval()\n        model.to(self.device)\n        self.model = model\n        end = time.perf_counter()\n        print(f'Successfully loaded model {model_name}, time cost: {end - start:.2f}s')\n\n\ndef test():\n    model_name = 'BelleGroup/BELLE-LLAMA-7B-2M-gptq'\n    checkpoint_path = 'llama7b-2m-4bit-128g.pt'\n    wbits = 4\n    groupsize = 128\n\n    predictor = LLaMaGPTQ(model_name, checkpoint_path, wbits, groupsize)\n    device = predictor.device\n    tokenizer = predictor.tokenizer\n    model = predictor.model\n    min_length = 10\n    max_length = 2048\n    top_p = 0.95\n    temperature = 0.8\n\n    print(\"Human:\")\n    line = input()\n    inputs = 'Human: ' + line.strip() + '\\n\\nAssistant:'\n    input_ids = tokenizer.encode(inputs, return_tensors=\"pt\").to(device)\n\n    with torch.no_grad():\n        generated_ids = model.generate(\n            input_ids,\n            do_sample=True,\n            min_length=min_length,\n            max_length=max_length,\n            top_p=top_p,\n            temperature=temperature,\n        )\n    print(\"Assistant:\\n【\")\n    print(tokenizer.decode([el.item() for el in generated_ids[0]]))\n    print(\"】\\n-------------------------------\\n\")\n\n    for x in predictor.predict_continue(\n            line, '', max_length, top_p, temperature, [True], None):\n        print(\"Assistant:\\n【\")\n        print(x[0][-1][1])\n        print(\"】\\n-------------------------------\\n\")\n\n\nif __name__ == '__main__':\n    test()\n"
  },
  {
    "path": "setup_offline.bat",
    "content": "cd /D \"%~dp0\"\n\nrem set http_proxy=http://127.0.0.1:7890 & set https_proxy=http://127.0.0.1:7890\n\necho Setup offline environment\ncall env_offline.bat\n\n:install_pip\nif exist %DIR%\\python\\Scripts\\pip.exe goto :install_python_packages\necho Install pip...\npython %PIP_INSTALLER_LOCATION%\n\n:install_python_packages\necho Install dependencies...\npip install torch==2.3.1 torchvision==0.18.1 --index-url https://download.pytorch.org/whl/cu121 --extra-index-url https://mirrors.bfsu.edu.cn/pypi/web/simple\npip install -r requirements.txt -i https://mirrors.bfsu.edu.cn/pypi/web/simple\n\necho Install finished.\npause\n"
  },
  {
    "path": "setup_venv.bat",
    "content": "cd /D \"%~dp0\"\n\necho Setup venv environment\ncall env_venv.bat\n\necho Install dependencies...\npip install torch==2.0.0+cu118 --index-url https://download.pytorch.org/whl/cu118 --extra-index-url https://mirrors.bfsu.edu.cn/pypi/web/simple\npip install -r requirements.txt -i https://mirrors.bfsu.edu.cn/pypi/web/simple\n\necho Install finished.\npause\n"
  },
  {
    "path": "start.bat",
    "content": "@echo off\n\ncd /D \"%~dp0\"\n\necho Start app.py\npython app.py %*\n\npause\n"
  },
  {
    "path": "start_api.bat",
    "content": "@echo off\n\ncd /D \"%~dp0\"\n\necho Start app_fastapi.py\npython app_fastapi.py %*\n\npause\n"
  },
  {
    "path": "start_offline.bat",
    "content": "@echo off\n\ncd /D \"%~dp0\"\n\ncall env_offline.bat\ncall start.bat\n"
  },
  {
    "path": "start_offline_api.bat",
    "content": "@echo off\n\ncd /D \"%~dp0\"\n\ncall env_offline.bat\ncall start_api.bat\n"
  },
  {
    "path": "start_offline_cmd.bat",
    "content": "@echo off\n\ncd /D \"%~dp0\"\n\ncall env_offline.bat\ncmd\npause\n"
  },
  {
    "path": "start_venv.bat",
    "content": "@echo off\n\ncd /D \"%~dp0\"\n\ncall env_venv.bat\ncall start.bat\n"
  },
  {
    "path": "test_fastapi.py",
    "content": "url = \"http://localhost:8000/stream\"\n\nparams = {\n    \"query\": \"Hello\",\n    'answer_prefix': \"Nice\",\n    \"allow_generate\": [True],\n    'history': [\n        ('你好啊', '你在和我套近乎吗?'), (\"别走啊\", \"我不喜欢不会说英语的人\"),\n        ('我会说英语哦', '那如果你会说的话 我可能会惊呼哦')\n    ]\n}\n\nimport requests\nfrom requests.exceptions import RequestException\n\n\ndef event_source_response_iterator(response):\n    buf = []\n    for chunk in response.iter_content(None):\n        if not chunk:\n            break\n        buf.extend(chunk.split(b\"\\n\"))\n        while buf:\n            line = buf.pop(0).strip()\n            if line:\n                try:\n                    event, data = line.split(b\":\", 1)\n                    if event.startswith(b\"id\"):\n                        continue\n                    if event.strip() == b\"data\":\n                        yield data.strip()\n                except ValueError:\n                    pass\n\n\ntry:\n    response = requests.post(url, json=params, stream=True)\n    response.raise_for_status()\n    for data in event_source_response_iterator(response):\n        print(data.decode())\nexcept RequestException as e:\n    print(e)\n"
  },
  {
    "path": "test_models.py",
    "content": "import os\nos.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'\n\n\ndef test_model(model_name):\n    if 'glm-4' in model_name.lower():\n        from predictors.glm4_predictor import GLM4\n        predictor = GLM4(model_name)\n    elif 'chatglm3' in model_name.lower():\n        from predictors.chatglm3_predictor import ChatGLM3\n        predictor = ChatGLM3(model_name)\n    elif 'chatglm2' in model_name.lower():\n        from predictors.chatglm2_predictor import ChatGLM2\n        predictor = ChatGLM2(model_name)\n    elif 'chatglm' in model_name.lower():\n        from predictors.chatglm_predictor import ChatGLM\n        predictor = ChatGLM(model_name)\n    elif 'gptq' in model_name.lower():\n        from predictors.llama_gptq import LLaMaGPTQ\n        predictor = LLaMaGPTQ(model_name)\n    elif 'llama' in model_name.lower():\n        from predictors.llama import LLaMa\n        predictor = LLaMa(model_name)\n    elif 'debug' in model_name.lower():\n        from predictors.debug import Debug\n        predictor = Debug(model_name)\n    else:\n        from predictors.chatglm_predictor import ChatGLM\n        predictor = ChatGLM(model_name)\n\n    top_p = 0.01\n    max_length = 128\n    temperature = 0.01\n\n    history = []\n    line = '你是谁？'\n    last_message = '我是张三丰，我是武当派'\n    print(line)\n    for x in predictor.predict_continue(\n            query=line, latest_message=last_message,\n            max_length=max_length, top_p=top_p, temperature=temperature,\n            allow_generate=[True], history=history, last_state=[[], None, None]):\n        print(x[0][-1][1])\n\n\ndef main():\n    model_list = [\n        'THUDM/glm-4-9b-chat-1m',\n    ]\n    for model_name in model_list:\n        print(f'Testing {model_name}')\n        test_model(model_name)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "utils_env.py",
    "content": "def collect_env():\n    import sys\n    from collections import defaultdict\n\n    env_info = {}\n    env_info['sys.platform'] = sys.platform\n    env_info['Python'] = sys.version.replace('\\n', '')\n    env_info['Python executable'] = sys.executable\n\n    import torch\n    env_info['PyTorch'] = torch.__version__\n\n    import gradio\n    env_info['Gradio'] = gradio.__version__\n\n    import transformers\n    env_info['Transformers'] = transformers.__version__\n\n    cuda_available = torch.cuda.is_available()\n    if cuda_available:\n        devices = defaultdict(list)\n        for k in range(torch.cuda.device_count()):\n            devices[torch.cuda.get_device_name(k)].append(str(k))\n        for name, device_ids in devices.items():\n            env_info['GPU ' + ','.join(device_ids)] = name\n    else:\n        env_info['CUDA available'] = False\n\n    return env_info\n\n\nif __name__ == '__main__':\n    for name, val in collect_env().items():\n        print(f'{name}: {val}')\n"
  }
]