[
  {
    "path": ".gitignore",
    "content": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.\n\nsrc/\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n.hypothesis/\n.pytest_cache/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/*/_build/\n\n# PyBuilder\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# pyenv\n.python-version\n\n# celery beat schedule file\ncelerybeat-schedule\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n\n# custom\ndata/\ndata\n.vscode\n.idea\n.DS_Store\n*.pkl\n*.pkl.json\n*.log.json\nwork_dirs/\n\n# Pytorch\n*.pth\n*.py~\n*.sh~\n\n# srun\n*.out\nbatchscript-*\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "README.md",
    "content": "# OREAL: Exploring the Limit of Outcome Reward for Learning Mathematical Reasoning\n\n\n[![license](https://img.shields.io/github/license/InternLM/opencompass.svg)](./LICENSE)\n[![arXiv](https://img.shields.io/badge/arXiv-2502.06781-b31b1b.svg)](https://arxiv.org/abs/2502.06781)\n[![huggingface](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-OREAL-ffc107?color=ffc107&logoColor=white)](https://huggingface.co/collections/internlm/oreal-67aaccf5a8192c1ba3cff018)\n\n\n## ✨ Introduction\n\n![main_fig](./figures/main_fig.jpg)\n\nReasoning abilities, especially those for solving complex math problems, are crucial components of general intelligence.\nRecent advances by proprietary companies, such as o-series models of OpenAI, have made remarkable progress on reasoning tasks. However, the complete technical details remain unrevealed, and the techniques that are believed certainly to be adopted are only reinforcement learning (RL) and the long chain of thoughts.\n\nWe proposes a new RL framework, termed OREAL, to pursue the performance limit that can be achieved through **O**utcome **RE**w**A**rd-based reinforcement **L**earning for mathematical reasoning tasks, where only binary outcome rewards are easily accessible.\n\n+ We theoretically prove that behavior cloning on positive trajectories from best-of-N (BoN) sampling is sufficient to learn the KL-regularized optimal policy in binary feedback environments.\n+ This formulation further implies that the rewards of negative samples should be reshaped to ensure the gradient consistency between positive and negative samples.\n+ To alleviate the long-existing difficulties brought by sparse rewards in RL, which are even exacerbated by the partial correctness of the long chain of thought for reasoning tasks, we further apply a token-level reward model to sample important tokens in reasoning trajectories for learning.\n\nThe OREAL implementation pseudocode is as follows:\n\n![algo](./figures/algo.png)\n\n\n## 📃 Key Results\n\nWith OREAL, for the first time, a 7B model can obtain 94.0 pass@1 accuracy on MATH-500 through RL, being on par with 32B models. OREAL-32B also surpasses previous 32B models trained by distillation with 95.0 pass@1 accuracy on MATH-500.\n\n![main_table](./figures/main_table.png)\n\n## 🤗 HuggingFace\n\n### Model\n\nOur OREAL models are available on Hugging Face 🤗:\n\n| Model    | Huggingface Repo |\n|----------|------------------|\n| OREAL-DeepSeek-R1-Distill-Qwen-7B  | [Model Link](https://huggingface.co/internlm/OREAL-DeepSeek-R1-Distill-Qwen-7B) |\n| OREAL-7B  | [Model Link](https://huggingface.co/internlm/OREAL-7B)  |\n| OREAL-32B  | [Model Link](https://huggingface.co/internlm/OREAL-32B)  |\n\nWe also release the models of SFT version. You can construct your own RL pipeline on them:)\n\n| Model    | Huggingface Repo |\n|----------|------------------|\n| OREAL-7B-SFT  | [Model Link](https://huggingface.co/internlm/OREAL-7B-SFT)  |\n| OREAL-32B-SFT  | [Model Link](https://huggingface.co/internlm/OREAL-32B-SFT)  |\n\n### Data\n\nWe release the prompts utilzed in our RL training phase.\n\n| Dataset    | Huggingface Repo |\n|----------|------------------|\n| RL Prompts  | [Model Link](https://huggingface.co/datasets/internlm/OREAL-RL-Prompts)  |\n\n## 🚄 Training Tutorial\n\n### 1. Install Dependencies\n\nOREAL utilizes [XTuner](https://github.com/InternLM/xtuner/tree/main) as the training engine. \n\n```bash\npip install torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/cu124\npip install flash-attn --no-build-isolation\npip install -r requirements.txt\n```\n\n### 2. Prepare Data (Optional)\n\nThe training data can be found at [HERE](https://huggingface.co/datasets/internlm/OREAL-RL-Prompts). The training script will automatically download the data from huggingface.\n\n### 3. Start LLM Verifier Service\n\nOREAL requires a language model as a verifier to evaluate the correctness of the generated solutions along with a rule based verificy function (see the [source code](oreal/judgers/math_judger.py)). We use Qwen2.5-72B-Instruct as the verifier in our experiments. You can start the verifier service with [lmdeploy](https://github.com/InternLM/lmdeploy) by running the following command:\n\n```bash\nlmdeploy serve api_server Qwen/Qwen2.5-72B-Instruct --tp 4 --chat-template qwen --log-level INFO --server-port 10003\n```\n\nOr you can use any other inference engine such as [sglang](https://github.com/sgl-project/sglang) or [vllm](https://github.com/vllm-project/vllm) or [ollama](https://ollama.com/). Just make sure the verifier service can be reached by OpenAI-compatible API.\n\nFill in the verifier service address in the [config file](./oreal/configs) before training.\n\n```python\njudgers_config = dict(\n    math_judger=dict(  # math judger related settings\n        hosts=[\"x.x.x.x:xxxx\", \"x.x.x.x:xxxx\"],  # verifier service addresses\n        stop_word=stop_word,\n        thinking_finish_words=[\"<conclude>\", \"**Final Answer**\", \"</think>\"],\n        num_processes=8,\n        concurrency_per_proc=(8, 8),\n    )\n)\n```\n\n### 4. Train OREAL\n\n**OREAL-7B**\n\n7B requires 32 GPUs to train. You can use the following command to train the model with [OREAL-7B-SFT](https://huggingface.co/internlm/OREAL-7B-SFT) as the initial policy:\n\n```bash\ntorchrun --nnodes 4 --nproc_per_node 8 --master_addr $MASTER_ADDR --node_rank $RANK --master_port $MASTER_PORT train_oreal.py oreal/configs/oreal_w_tokenrm_OREAL-7B-SFT_seqlen16k.py --total_steps 90 --work_dir ./work_dir/oreal_w_tokenrm_OREAL-7B-SFT_seqlen16k\n```\n\nIt takes about 9 hours to train the model 90 steps with 32xA100.\n\n**OREAL-32B**\n\n32B requires 128 GPUs to train. You can use the following command to train the model with [OREAL-32B-SFT](https://huggingface.co/internlm/OREAL-32B-SFT) as the initial policy:\n\n```bash\ntorchrun --nnodes 16 --nproc_per_node 8 --master_addr $MASTER_ADDR --node_rank $RANK --master_port $MASTER_PORT train_oreal.py oreal/configs/oreal_w_tokenrm_OREAL-32B-SFT_seqlen16k.py --total_steps 90 --work_dir ./work_dir/oreal_w_tokenrm_OREAL-32B-SFT_seqlen16k\n```\n\nMore detailed training settings can be found in the [oreal/configs](./oreal/configs) folder.\n\n**Note**:\n\n+ The best checkpoint may not be the last one. Consider evaluating during training and early stopping when the performance is saturated.\n\n\n## 🖊️ Citation\n\n```\n@article{lyu2025exploring,\n  title={Exploring the Limit of Outcome Reward for Learning Mathematical Reasoning},\n  author={Lyu, Chengqi and Gao, Songyang and Gu, Yuzhe and Zhang, Wenwei and Gao, Jianfei and Liu, Kuikun and Wang, Ziyi and Li, Shuaibin and Zhao, Qian and Huang, Haian and others},\n  journal={arXiv preprint arXiv:2502.06781},\n  year={2025}\n}\n```\n\n## 💳 License\n\nThis project is released under the Apache 2.0 [license](./LICENSE).\n"
  },
  {
    "path": "oreal/configs/oreal_w_tokenrm_DSR1-Distll-Qwen-7B_seqlen16k.py",
    "content": "# Model Related Settings\nactor = \"deepseek-ai/DeepSeek-R1-Distill-Qwen-7B\"\nreference = actor\ntoken_level_rm = actor\n\n# Tokenizer related settings\n# jinja2 template for hf tokenizer\nchat_template = \"{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<｜User｜>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<｜Assistant｜><｜tool▁calls▁begin｜><｜tool▁call▁begin｜>' + tool['type'] + '<｜tool▁sep｜>' + tool['function']['name'] + '\\\\n' + '```json' + '\\\\n' + tool['function']['arguments'] + '\\\\n' + '```' + '<｜tool▁call▁end｜>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\\\n' + '<｜tool▁call▁begin｜>' + tool['type'] + '<｜tool▁sep｜>' + tool['function']['name'] + '\\\\n' + '```json' + '\\\\n' + tool['function']['arguments'] + '\\\\n' + '```' + '<｜tool▁call▁end｜>'}}{{'<｜tool▁calls▁end｜><｜end▁of▁sentence｜>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<｜tool▁outputs▁end｜>' + message['content'] + '<｜end▁of▁sentence｜>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<｜Assistant｜>' + content + '<｜end▁of▁sentence｜>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<｜tool▁outputs▁begin｜><｜tool▁output▁begin｜>' + message['content'] + '<｜tool▁output▁end｜>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\\\n<｜tool▁output▁begin｜>' + message['content'] + '<｜tool▁output▁end｜>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<｜tool▁outputs▁end｜>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<｜Assistant｜>'}}{% endif %}\"\nstop_word = \"<｜end▁of▁sentence｜>\"\n\ndtype = \"auto\"\nselective_recompute = 1.0\ncpu_offload = False\ncuda_graph = True\ntp_size = 4\nsp_size = 1\n\n# Dataset Related Settings\ndata_difficulty_balance_cfg = [\n    # pass rate range, repeat times\n    ((0.0, 0.2), 6),\n    ((0.2, 0.4), 4),\n    ((0.4, 0.6), 4),\n    ((0.6, 0.8), 2),\n]\ndatasets = \"internlm/OREAL-RL-Prompts\"\nnum_workers = 0\n\n# Generate Related Settings\ngen_global_batch = 1024\ngen_max_new = 14000\ngen_max_length = 16384\ngen_top_k = 0  # set to 0 means not use topk sampling\ngen_top_p = 0.9\ntemperature = 1.0\ngen_do_sample = True\nmax_prefill_batch = 16\nprompt_repeat_k = 16  # sample k times for each prompt\n\n# Optimizer Related Settings\nrl_global_batch = gen_global_batch\nrl_mirco_batch = 2\nfilter_trajectory = True  # sample one correct and one incorrect trajectory for each prompt\nwarmup_steps = 10\ntotal_steps = 90\nactor_freeze_steps = 10  # freeze actor and only update token level reward model for the first 10 steps\nactor_lr = 5e-7\nactor_min_lr = 1e-7\ntoken_level_rm_lr = 2e-6\ntoken_level_rm_lr_min = 4e-7\nwd = 0.01  # weight decay\nmax_grad_norm = 1  # gradient clipping\n\n# importance sampling setting with token level reward model\nthreshold_rescale = True\ncorrect_threshold = 0.5\nincorrect_threshold = 0.5\n# topk_rescale = True\n# correct_topk_ratio = 0.25\n# incorrect_topk_ratio = 0.25\n\nreward_shaping_type = \"rloo\"\nloss_type = \"per_token\"\npositive_loss_factor = 1.0\nnegative_loss_factor = 0.5\npos_mult_adv = True\nkl_coef = 0.01  # KL coefficient\n\n# General Settings\nwork_dir = \"work_dirs\"  # directory to save logs and checkpoints\ncheckpoint_interval = 10  # interval to save checkpoint, <1 means save by proportion, >=1 means save by steps\nlog_interval = 1  # interval steps for logging\nseed = 0  # random seed\ndebug = False  # set log level to DEBUG\n\n# judger related settings\njudgers_config = dict(\n    math_judger=dict(  # math judger related settings\n        hosts=[\n            \"YOUR_JUDGER_HOST1:PORT\",\n            \"YOUR_JUDGER_HOST2:PORT\",\n        ],\n        stop_word=stop_word,\n        thinking_finish_words=[\"<conclude>\", \"**Final Answer**\", \"</think>\"],\n        num_processes=8,\n        concurrency_per_proc=(8, 8),\n    )\n)\ndata_judger_mapping = dict(math=[\"math_judger\"])\n"
  },
  {
    "path": "oreal/configs/oreal_w_tokenrm_OREAL-32B-SFT_seqlen16k.py",
    "content": "# Model Related Settings\nactor = 'internlm/OREAL-32B-SFT'\nreference = actor\ntoken_level_rm = actor\n\n# Tokenizer related settings\n# jinja2 template for hf tokenizer\nchat_template = \"{% set sys_prompt = \\\"You are an expert mathematician with extensive experience in mathematical competitions. You approach problems through systematic thinking and rigorous reasoning. When solving problems, follow these thought processes:\\\\n\\\\n## Deep Understanding\\\\nTake time to fully comprehend the problem before attempting a solution. Consider:\\\\n- What is the real question being asked?\\\\n- What are the given conditions and what do they tell us?\\\\n- Are there any special restrictions or assumptions?\\\\n- Which information is crucial and which is supplementary?\\\\n\\\\n## Multi-angle Analysis\\\\nBefore solving, conduct thorough analysis:\\\\n- What mathematical concepts and properties are involved?\\\\n- Can you recall similar classic problems or solution methods?\\\\n- Would diagrams or tables help visualize the problem?\\\\n- Are there special cases that need separate consideration?\\\\n\\\\n## Systematic Thinking\\\\nPlan your solution path:\\\\n- Propose multiple possible approaches\\\\n- Analyze the feasibility and merits of each method\\\\n- Choose the most appropriate method and explain why\\\\n- Break complex problems into smaller, manageable steps\\\\n\\\\n## Rigorous Proof\\\\nDuring the solution process:\\\\n- Provide solid justification for each step\\\\n- Include detailed proofs for key conclusions\\\\n- Pay attention to logical connections\\\\n- Be vigilant about potential oversights\\\\n\\\\n## Repeated Verification\\\\nAfter completing your solution:\\\\n- Verify your results satisfy all conditions\\\\n- Check for overlooked special cases\\\\n- Consider if the solution can be optimized or simplified\\\\n- Review your reasoning process\\\\n\\\\nRemember:\\\\n1. Take time to think thoroughly rather than rushing to an answer\\\\n2. Rigorously prove each key conclusion\\\\n3. Keep an open mind and try different approaches\\\\n4. Summarize valuable problem-solving methods\\\\n5. Maintain healthy skepticism and verify multiple times\\\\n\\\\nYour response should reflect deep mathematical understanding and precise logical thinking, making your solution path and reasoning clear to others.\\\\n\\\\nWhen you're ready, present your complete solution with:\\\\n- Clear problem understanding\\\\n- Detailed solution process\\\\n- Key insights\\\\n- Thorough verification\\\\n\\\\nFocus on clear, logical progression of ideas and thorough explanation of your mathematical reasoning. Provide answers in the same language as the user asking the question, repeat the final answer using a '\\\\\\\\boxed{}' without any units, you have [[8192]] tokens to complete the answer.\\\" %}{%- if tools %}\\n    {{- '<|im_start|>system\\\\n' }}\\n    {%- if messages[0]['role'] == 'system' %}\\n        {{- messages[0]['content'] }}\\n    {%- else %}\\n        {{- sys_prompt }}\\n    {%- endif %}\\n    {{- \\\"\\\\n\\\\n# Tools\\\\n\\\\nYou may call one or more functions to assist with the user query.\\\\n\\\\nYou are provided with function signatures within <tools></tools> XML tags:\\\\n<tools>\\\" }}\\n    {%- for tool in tools %}\\n        {{- \\\"\\\\n\\\" }}\\n        {{- tool | tojson }}\\n    {%- endfor %}\\n    {{- \\\"\\\\n</tools>\\\\n\\\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\\\n<tool_call>\\\\n{\\\\\\\"name\\\\\\\": <function-name>, \\\\\\\"arguments\\\\\\\": <args-json-object>}\\\\n</tool_call><|im_end|>\\\\n\\\" }}\\n{%- else %}\\n    {%- if messages[0]['role'] == 'system' %}\\n        {{- '<|im_start|>system\\\\n' + messages[0]['content'] + '<|im_end|>\\\\n' }}\\n    {%- else %}\\n        {{- '<|im_start|>system\\\\n' ~ sys_prompt ~ '<|im_end|>\\\\n' }}\\n    {%- endif %}\\n{%- endif %}\\n{%- for message in messages %}\\n    {%- if (message.role == \\\"user\\\") or (message.role == \\\"system\\\" and not loop.first) or (message.role == \\\"assistant\\\" and not message.tool_calls) %}\\n        {{- '<|im_start|>' + message.role + '\\\\n' + message.content + '<|im_end|>' + '\\\\n' }}\\n    {%- elif message.role == \\\"assistant\\\" %}\\n        {{- '<|im_start|>' + message.role }}\\n        {%- if message.content %}\\n            {{- '\\\\n' + message.content }}\\n        {%- endif %}\\n        {%- for tool_call in message.tool_calls %}\\n            {%- if tool_call.function is defined %}\\n                {%- set tool_call = tool_call.function %}\\n            {%- endif %}\\n            {{- '\\\\n<tool_call>\\\\n{\\\"name\\\": \\\"' }}\\n            {{- tool_call.name }}\\n            {{- '\\\", \\\"arguments\\\": ' }}\\n            {{- tool_call.arguments | tojson }}\\n            {{- '}\\\\n</tool_call>' }}\\n        {%- endfor %}\\n        {{- '<|im_end|>\\\\n' }}\\n    {%- elif message.role == \\\"tool\\\" %}\\n        {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \\\"tool\\\") %}\\n            {{- '<|im_start|>user' }}\\n        {%- endif %}\\n        {{- '\\\\n<tool_response>\\\\n' }}\\n        {{- message.content }}\\n        {{- '\\\\n</tool_response>' }}\\n        {%- if loop.last or (messages[loop.index0 + 1].role != \\\"tool\\\") %}\\n            {{- '<|im_end|>\\\\n' }}\\n        {%- endif %}\\n    {%- endif %}\\n{%- endfor %}\\n{%- if add_generation_prompt %}\\n    {{- '<|im_start|>assistant\\\\n' }}\\n{%- endif %}\\n\"\nstop_word = \"<|im_end|>\"\n\ndtype = \"auto\"\nselective_recompute = 1.0\ncpu_offload = False\ncuda_graph = True\ntp_size = 8\nsp_size = 1\n\n# Dataset Related Settings\ndata_difficulty_balance_cfg = [\n    # pass rate range, repeat times\n    ((0.0, 0.2), 6),\n    ((0.2, 0.4), 4),\n    ((0.4, 0.6), 4),\n    ((0.6, 0.8), 2),\n]\ndatasets = \"internlm/OREAL-RL-Prompts\"\nnum_workers = 0\n\n# Generate Related Settings\ngen_global_batch = 1024\ngen_max_new = 14000\ngen_max_length = 16384\ngen_top_k = 0  # set to 0 means not use topk sampling\ngen_top_p = 0.9\ntemperature = 1.0\ngen_do_sample = True\nmax_prefill_batch = 16\nprompt_repeat_k = 16  # sample k times for each prompt\n\n# Optimizer Related Settings\nrl_global_batch = gen_global_batch\nrl_mirco_batch = 2\nfilter_trajectory = True  # sample one correct and one incorrect trajectory for each prompt\nwarmup_steps = 10\ntotal_steps = 90\nactor_freeze_steps = 10  # freeze actor and only update token level reward model for the first 10 steps\nactor_lr = 5e-7\nactor_min_lr = 1e-7\ntoken_level_rm_lr = 2e-6\ntoken_level_rm_lr_min = 4e-7\nwd = 0.01  # weight decay\nmax_grad_norm = 1  # gradient clipping\n\n# importance sampling setting with token level reward model\nthreshold_rescale = True\ncorrect_threshold = 0.5\nincorrect_threshold = 0.5\n# topk_rescale = True\n# correct_topk_ratio = 0.25\n# incorrect_topk_ratio = 0.25\n\nreward_shaping_type = \"rloo\"\nloss_type = \"per_token\"\npositive_loss_factor = 1.0\nnegative_loss_factor = 0.5\npos_mult_adv = True\nkl_coef = 0.01  # KL coefficient\n\n# General Settings\nwork_dir = \"work_dirs\"  # directory to save logs and checkpoints\ncheckpoint_interval = 10  # interval to save checkpoint, <1 means save by proportion, >=1 means save by steps\nlog_interval = 1  # interval steps for logging\nseed = 0  # random seed\ndebug = False  # set log level to DEBUG\n\n# judger related settings\njudgers_config = dict(\n    math_judger=dict(  # math judger related settings\n        hosts=[\n            \"YOUR_JUDGER_HOST1:PORT\",\n            \"YOUR_JUDGER_HOST2:PORT\",\n        ],\n        stop_word=stop_word,\n        thinking_finish_words=[\"<conclude>\", \"**Final Answer**\", \"</think>\"],\n        num_processes=8,\n        concurrency_per_proc=(8, 8),\n    )\n)\ndata_judger_mapping = dict(math=[\"math_judger\"])\n"
  },
  {
    "path": "oreal/configs/oreal_w_tokenrm_OREAL-7B-SFT_seqlen16k.py",
    "content": "# Model Related Settings\nactor = \"internlm/OREAL-7B-SFT\"\nreference = actor\ntoken_level_rm = actor\n\n# Tokenizer related settings\n# jinja2 template for hf tokenizer\nchat_template = \"{% set sys_prompt = \\\"You are an expert mathematician with extensive experience in mathematical competitions. You approach problems through systematic thinking and rigorous reasoning. When solving problems, follow these thought processes:\\\\n\\\\n## Deep Understanding\\\\nTake time to fully comprehend the problem before attempting a solution. Consider:\\\\n- What is the real question being asked?\\\\n- What are the given conditions and what do they tell us?\\\\n- Are there any special restrictions or assumptions?\\\\n- Which information is crucial and which is supplementary?\\\\n\\\\n## Multi-angle Analysis\\\\nBefore solving, conduct thorough analysis:\\\\n- What mathematical concepts and properties are involved?\\\\n- Can you recall similar classic problems or solution methods?\\\\n- Would diagrams or tables help visualize the problem?\\\\n- Are there special cases that need separate consideration?\\\\n\\\\n## Systematic Thinking\\\\nPlan your solution path:\\\\n- Propose multiple possible approaches\\\\n- Analyze the feasibility and merits of each method\\\\n- Choose the most appropriate method and explain why\\\\n- Break complex problems into smaller, manageable steps\\\\n\\\\n## Rigorous Proof\\\\nDuring the solution process:\\\\n- Provide solid justification for each step\\\\n- Include detailed proofs for key conclusions\\\\n- Pay attention to logical connections\\\\n- Be vigilant about potential oversights\\\\n\\\\n## Repeated Verification\\\\nAfter completing your solution:\\\\n- Verify your results satisfy all conditions\\\\n- Check for overlooked special cases\\\\n- Consider if the solution can be optimized or simplified\\\\n- Review your reasoning process\\\\n\\\\nRemember:\\\\n1. Take time to think thoroughly rather than rushing to an answer\\\\n2. Rigorously prove each key conclusion\\\\n3. Keep an open mind and try different approaches\\\\n4. Summarize valuable problem-solving methods\\\\n5. Maintain healthy skepticism and verify multiple times\\\\n\\\\nYour response should reflect deep mathematical understanding and precise logical thinking, making your solution path and reasoning clear to others.\\\\n\\\\nWhen you're ready, present your complete solution with:\\\\n- Clear problem understanding\\\\n- Detailed solution process\\\\n- Key insights\\\\n- Thorough verification\\\\n\\\\nFocus on clear, logical progression of ideas and thorough explanation of your mathematical reasoning. Provide answers in the same language as the user asking the question, repeat the final answer using a '\\\\\\\\boxed{}' without any units, you have [[8192]] tokens to complete the answer.\\\" %}{%- if tools %}\\n    {{- '<|im_start|>system\\\\n' }}\\n    {%- if messages[0]['role'] == 'system' %}\\n        {{- messages[0]['content'] }}\\n    {%- else %}\\n        {{- sys_prompt }}\\n    {%- endif %}\\n    {{- \\\"\\\\n\\\\n# Tools\\\\n\\\\nYou may call one or more functions to assist with the user query.\\\\n\\\\nYou are provided with function signatures within <tools></tools> XML tags:\\\\n<tools>\\\" }}\\n    {%- for tool in tools %}\\n        {{- \\\"\\\\n\\\" }}\\n        {{- tool | tojson }}\\n    {%- endfor %}\\n    {{- \\\"\\\\n</tools>\\\\n\\\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\\\n<tool_call>\\\\n{\\\\\\\"name\\\\\\\": <function-name>, \\\\\\\"arguments\\\\\\\": <args-json-object>}\\\\n</tool_call><|im_end|>\\\\n\\\" }}\\n{%- else %}\\n    {%- if messages[0]['role'] == 'system' %}\\n        {{- '<|im_start|>system\\\\n' + messages[0]['content'] + '<|im_end|>\\\\n' }}\\n    {%- else %}\\n        {{- '<|im_start|>system\\\\n' ~ sys_prompt ~ '<|im_end|>\\\\n' }}\\n    {%- endif %}\\n{%- endif %}\\n{%- for message in messages %}\\n    {%- if (message.role == \\\"user\\\") or (message.role == \\\"system\\\" and not loop.first) or (message.role == \\\"assistant\\\" and not message.tool_calls) %}\\n        {{- '<|im_start|>' + message.role + '\\\\n' + message.content + '<|im_end|>' + '\\\\n' }}\\n    {%- elif message.role == \\\"assistant\\\" %}\\n        {{- '<|im_start|>' + message.role }}\\n        {%- if message.content %}\\n            {{- '\\\\n' + message.content }}\\n        {%- endif %}\\n        {%- for tool_call in message.tool_calls %}\\n            {%- if tool_call.function is defined %}\\n                {%- set tool_call = tool_call.function %}\\n            {%- endif %}\\n            {{- '\\\\n<tool_call>\\\\n{\\\"name\\\": \\\"' }}\\n            {{- tool_call.name }}\\n            {{- '\\\", \\\"arguments\\\": ' }}\\n            {{- tool_call.arguments | tojson }}\\n            {{- '}\\\\n</tool_call>' }}\\n        {%- endfor %}\\n        {{- '<|im_end|>\\\\n' }}\\n    {%- elif message.role == \\\"tool\\\" %}\\n        {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \\\"tool\\\") %}\\n            {{- '<|im_start|>user' }}\\n        {%- endif %}\\n        {{- '\\\\n<tool_response>\\\\n' }}\\n        {{- message.content }}\\n        {{- '\\\\n</tool_response>' }}\\n        {%- if loop.last or (messages[loop.index0 + 1].role != \\\"tool\\\") %}\\n            {{- '<|im_end|>\\\\n' }}\\n        {%- endif %}\\n    {%- endif %}\\n{%- endfor %}\\n{%- if add_generation_prompt %}\\n    {{- '<|im_start|>assistant\\\\n' }}\\n{%- endif %}\\n\"\nstop_word = \"<|im_end|>\"\n\ndtype = \"auto\"\nselective_recompute = 1.0\ncpu_offload = False\ncuda_graph = True\ntp_size = 4\nsp_size = 1\n\n# Dataset Related Settings\ndata_difficulty_balance_cfg = [\n    # pass rate range, repeat times\n    ((0.0, 0.2), 6),\n    ((0.2, 0.4), 4),\n    ((0.4, 0.6), 4),\n    ((0.6, 0.8), 2),\n]\ndatasets = \"internlm/OREAL-RL-Prompts\"\nnum_workers = 0\n\n# Generate Related Settings\ngen_global_batch = 1024\ngen_max_new = 14000\ngen_max_length = 16384\ngen_top_k = 0  # set to 0 means not use topk sampling\ngen_top_p = 0.9\ntemperature = 1.0\ngen_do_sample = True\nmax_prefill_batch = 16\nprompt_repeat_k = 16  # sample k times for each prompt\n\n# Optimizer Related Settings\nrl_global_batch = gen_global_batch\nrl_mirco_batch = 2\nfilter_trajectory = True  # sample one correct and one incorrect trajectory for each prompt\nwarmup_steps = 10\ntotal_steps = 90\nactor_freeze_steps = 10  # freeze actor and only update token level reward model for the first 10 steps\nactor_lr = 5e-7\nactor_min_lr = 1e-7\ntoken_level_rm_lr = 2e-6\ntoken_level_rm_lr_min = 4e-7\nwd = 0.01  # weight decay\nmax_grad_norm = 1  # gradient clipping\n\n# importance sampling setting with token level reward model\nthreshold_rescale = True\ncorrect_threshold = 0.5\nincorrect_threshold = 0.5\n# topk_rescale = True\n# correct_topk_ratio = 0.25\n# incorrect_topk_ratio = 0.25\n\nreward_shaping_type = \"rloo\"\nloss_type = \"per_token\"\npositive_loss_factor = 1.0\nnegative_loss_factor = 0.5\npos_mult_adv = True\nkl_coef = 0.01  # KL coefficient\n\n# General Settings\nwork_dir = \"work_dirs\"  # directory to save logs and checkpoints\ncheckpoint_interval = 10  # interval to save checkpoint, <1 means save by proportion, >=1 means save by steps\nlog_interval = 1  # interval steps for logging\nseed = 0  # random seed\ndebug = False  # set log level to DEBUG\n\n# judger related settings\njudgers_config = dict(\n    math_judger=dict(  # math judger related settings\n        hosts=[\n            \"YOUR_JUDGER_HOST1:PORT\",\n            \"YOUR_JUDGER_HOST2:PORT\",\n        ],\n        stop_word=stop_word,\n        thinking_finish_words=[\"<conclude>\", \"**Final Answer**\", \"</think>\"],\n        num_processes=8,\n        concurrency_per_proc=(8, 8),\n    )\n)\ndata_judger_mapping = dict(math=[\"math_judger\"])\n"
  },
  {
    "path": "oreal/configs/oreal_wo_tokenrm_OREAL-7B-SFT_seqlen16k.py",
    "content": "# Model Related Settings\nactor = \"internlm/OREAL-7B-SFT\"\nreference = actor\ntoken_level_rm = None\n\n# Tokenizer related settings\n# jinja2 template for hf tokenizer\nchat_template = \"{% set sys_prompt = \\\"You are an expert mathematician with extensive experience in mathematical competitions. You approach problems through systematic thinking and rigorous reasoning. When solving problems, follow these thought processes:\\\\n\\\\n## Deep Understanding\\\\nTake time to fully comprehend the problem before attempting a solution. Consider:\\\\n- What is the real question being asked?\\\\n- What are the given conditions and what do they tell us?\\\\n- Are there any special restrictions or assumptions?\\\\n- Which information is crucial and which is supplementary?\\\\n\\\\n## Multi-angle Analysis\\\\nBefore solving, conduct thorough analysis:\\\\n- What mathematical concepts and properties are involved?\\\\n- Can you recall similar classic problems or solution methods?\\\\n- Would diagrams or tables help visualize the problem?\\\\n- Are there special cases that need separate consideration?\\\\n\\\\n## Systematic Thinking\\\\nPlan your solution path:\\\\n- Propose multiple possible approaches\\\\n- Analyze the feasibility and merits of each method\\\\n- Choose the most appropriate method and explain why\\\\n- Break complex problems into smaller, manageable steps\\\\n\\\\n## Rigorous Proof\\\\nDuring the solution process:\\\\n- Provide solid justification for each step\\\\n- Include detailed proofs for key conclusions\\\\n- Pay attention to logical connections\\\\n- Be vigilant about potential oversights\\\\n\\\\n## Repeated Verification\\\\nAfter completing your solution:\\\\n- Verify your results satisfy all conditions\\\\n- Check for overlooked special cases\\\\n- Consider if the solution can be optimized or simplified\\\\n- Review your reasoning process\\\\n\\\\nRemember:\\\\n1. Take time to think thoroughly rather than rushing to an answer\\\\n2. Rigorously prove each key conclusion\\\\n3. Keep an open mind and try different approaches\\\\n4. Summarize valuable problem-solving methods\\\\n5. Maintain healthy skepticism and verify multiple times\\\\n\\\\nYour response should reflect deep mathematical understanding and precise logical thinking, making your solution path and reasoning clear to others.\\\\n\\\\nWhen you're ready, present your complete solution with:\\\\n- Clear problem understanding\\\\n- Detailed solution process\\\\n- Key insights\\\\n- Thorough verification\\\\n\\\\nFocus on clear, logical progression of ideas and thorough explanation of your mathematical reasoning. Provide answers in the same language as the user asking the question, repeat the final answer using a '\\\\\\\\boxed{}' without any units, you have [[8192]] tokens to complete the answer.\\\" %}{%- if tools %}\\n    {{- '<|im_start|>system\\\\n' }}\\n    {%- if messages[0]['role'] == 'system' %}\\n        {{- messages[0]['content'] }}\\n    {%- else %}\\n        {{- sys_prompt }}\\n    {%- endif %}\\n    {{- \\\"\\\\n\\\\n# Tools\\\\n\\\\nYou may call one or more functions to assist with the user query.\\\\n\\\\nYou are provided with function signatures within <tools></tools> XML tags:\\\\n<tools>\\\" }}\\n    {%- for tool in tools %}\\n        {{- \\\"\\\\n\\\" }}\\n        {{- tool | tojson }}\\n    {%- endfor %}\\n    {{- \\\"\\\\n</tools>\\\\n\\\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\\\n<tool_call>\\\\n{\\\\\\\"name\\\\\\\": <function-name>, \\\\\\\"arguments\\\\\\\": <args-json-object>}\\\\n</tool_call><|im_end|>\\\\n\\\" }}\\n{%- else %}\\n    {%- if messages[0]['role'] == 'system' %}\\n        {{- '<|im_start|>system\\\\n' + messages[0]['content'] + '<|im_end|>\\\\n' }}\\n    {%- else %}\\n        {{- '<|im_start|>system\\\\n' ~ sys_prompt ~ '<|im_end|>\\\\n' }}\\n    {%- endif %}\\n{%- endif %}\\n{%- for message in messages %}\\n    {%- if (message.role == \\\"user\\\") or (message.role == \\\"system\\\" and not loop.first) or (message.role == \\\"assistant\\\" and not message.tool_calls) %}\\n        {{- '<|im_start|>' + message.role + '\\\\n' + message.content + '<|im_end|>' + '\\\\n' }}\\n    {%- elif message.role == \\\"assistant\\\" %}\\n        {{- '<|im_start|>' + message.role }}\\n        {%- if message.content %}\\n            {{- '\\\\n' + message.content }}\\n        {%- endif %}\\n        {%- for tool_call in message.tool_calls %}\\n            {%- if tool_call.function is defined %}\\n                {%- set tool_call = tool_call.function %}\\n            {%- endif %}\\n            {{- '\\\\n<tool_call>\\\\n{\\\"name\\\": \\\"' }}\\n            {{- tool_call.name }}\\n            {{- '\\\", \\\"arguments\\\": ' }}\\n            {{- tool_call.arguments | tojson }}\\n            {{- '}\\\\n</tool_call>' }}\\n        {%- endfor %}\\n        {{- '<|im_end|>\\\\n' }}\\n    {%- elif message.role == \\\"tool\\\" %}\\n        {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \\\"tool\\\") %}\\n            {{- '<|im_start|>user' }}\\n        {%- endif %}\\n        {{- '\\\\n<tool_response>\\\\n' }}\\n        {{- message.content }}\\n        {{- '\\\\n</tool_response>' }}\\n        {%- if loop.last or (messages[loop.index0 + 1].role != \\\"tool\\\") %}\\n            {{- '<|im_end|>\\\\n' }}\\n        {%- endif %}\\n    {%- endif %}\\n{%- endfor %}\\n{%- if add_generation_prompt %}\\n    {{- '<|im_start|>assistant\\\\n' }}\\n{%- endif %}\\n\"\nstop_word = \"<|im_end|>\"\n\ndtype = \"auto\"\nselective_recompute = 1.0\ncpu_offload = False\ncuda_graph = True\ntp_size = 4\nsp_size = 1\n\n# Dataset Related Settings\ndata_difficulty_balance_cfg = [\n    # pass rate range, repeat times\n    ((0.0, 0.2), 6),\n    ((0.2, 0.4), 4),\n    ((0.4, 0.6), 4),\n    ((0.6, 0.8), 2),\n]\ndatasets = \"internlm/OREAL-RL-Prompts\"\nnum_workers = 0\n\n# Generate Related Settings\ngen_global_batch = 1024\ngen_max_new = 14000\ngen_max_length = 16384\ngen_top_k = 0  # set to 0 means not use topk sampling\ngen_top_p = 0.9\ntemperature = 1.0\ngen_do_sample = True\nmax_prefill_batch = 16\nprompt_repeat_k = 16  # sample k times for each prompt\n\n# Optimizer Related Settings\nrl_global_batch = gen_global_batch\nrl_mirco_batch = 2\nfilter_trajectory = False\nwarmup_steps = 10\ntotal_steps = 90\nactor_freeze_steps = 0\nactor_lr = 5e-7\nactor_min_lr = 1e-7\ntoken_level_rm_lr = 2e-6\ntoken_level_rm_lr_min = 4e-7\nwd = 0.01  # weight decay\nmax_grad_norm = 1  # gradient clipping\n\n# importance sampling setting with token level reward model\nthreshold_rescale = True\ncorrect_threshold = 0.5\nincorrect_threshold = 0.5\n# topk_rescale = True\n# correct_topk_ratio = 0.25\n# incorrect_topk_ratio = 0.25\n\nreward_shaping_type = \"rloo\"\nloss_type = \"per_token\"\npositive_loss_factor = 1.0\nnegative_loss_factor = 0.5\npos_mult_adv = True\nkl_coef = 0.01  # KL coefficient\n\n# General Settings\nwork_dir = \"work_dirs\"  # directory to save logs and checkpoints\ncheckpoint_interval = 10  # interval to save checkpoint, <1 means save by proportion, >=1 means save by steps\nlog_interval = 1  # interval steps for logging\nseed = 0  # random seed\ndebug = False  # set log level to DEBUG\n\n# judger related settings\njudgers_config = dict(\n    math_judger=dict(  # math judger related settings\n        hosts=[\n            \"YOUR_JUDGER_HOST1:PORT\",\n            \"YOUR_JUDGER_HOST2:PORT\",\n        ],\n        stop_word=stop_word,\n        thinking_finish_words=[\"<conclude>\", \"**Final Answer**\", \"</think>\"],\n        num_processes=8,\n        concurrency_per_proc=(8, 8),\n    )\n)\ndata_judger_mapping = dict(math=[\"math_judger\"])\n"
  },
  {
    "path": "oreal/datasets/__init__.py",
    "content": "# Copyright (c) InternLM. All rights reserved.\nfrom .prompt import OrealPromptDataset, PromptCollator\nfrom .trajectory import (\n    InferDataset,\n    TrajectoryCollator,\n    TrajectoryDataset,\n    TrajectoryDatasetWithFilter,\n)\n\n__all__ = [\n    \"OrealPromptDataset\",\n    \"PromptCollator\",\n    \"InferDataset\",\n    \"TrajectoryDataset\",\n    \"TrajectoryDatasetWithFilter\",\n    \"TrajectoryCollator\",\n]\n"
  },
  {
    "path": "oreal/datasets/prompt.py",
    "content": "# Copyright (c) InternLM. All rights reserved.\nimport json\n\nimport torch\nfrom datasets import load_dataset\nfrom torch.nn.utils.rnn import pad_sequence\nfrom torch.utils.data import Dataset\nfrom xtuner._lite import get_logger\n\nlogger = get_logger()\n\n\ndef load_hf_datasets(repo, split=\"train\"):\n    dataset = load_dataset(repo, split=split)\n    converted_ds = []\n    for sample in dataset:\n        converted_ds.append(\n            {\n                \"pass_rate\": sample[\"pass_rate\"],\n                \"message_data\": [{\"role\": \"user\", \"content\": sample[\"question\"]}],\n                \"metadata\": {\n                    \"data_source\": \"math\",  # for the router to know which judger to use\n                    \"gold_answer\": sample[\"gold_answer\"],\n                },\n            }\n        )\n    logger.info(f\"Loaded {len(converted_ds)} samples from {repo}\")\n    return converted_ds\n\n\ndef load_jsonl_datasets(file_path):\n    with open(file_path, \"r\") as f:\n        lines = f.readlines()\n    datasets = []\n    for line in lines:\n        sample = json.loads(line)\n        datasets.append(\n            {\n                \"pass_rate\": sample[\"pass_rate\"],\n                \"message_data\": [{\"role\": \"user\", \"content\": sample[\"question\"]}],\n                \"metadata\": {\n                    \"data_source\": \"math\",  # for the router to know which judger to use\n                    \"gold_answer\": sample[\"gold_answer\"],\n                },\n            }\n        )\n    logger.info(f\"Loaded {len(datasets)} samples from {file_path}\")\n    return datasets\n\n\ndef balance_difficulty_with_cfg(dataset, difficulty_balance_cfg):\n    balanced_dataset = []\n    for sample in dataset:\n        pass_rate = sample[\"pass_rate\"]\n        for (low, high), repeat in difficulty_balance_cfg:\n            if low <= pass_rate < high:\n                balanced_dataset.extend([sample] * repeat)\n                break\n    logger.info(\n        f\"After difficulty balancing, the dataset size is {len(balanced_dataset)}\"\n    )\n    return balanced_dataset\n\n\nclass OrealPromptDataset(Dataset):\n    def __init__(self, path, tokenizer, difficulty_balance_cfg=None):\n        if isinstance(path, str):\n            path = [path]\n        dataset = []\n        for p in path:\n            if p.endswith(\".jsonl\"):\n                dataset.extend(load_jsonl_datasets(p))\n            else:\n                dataset.extend(load_hf_datasets(p))\n        if difficulty_balance_cfg:\n            dataset = balance_difficulty_with_cfg(dataset, difficulty_balance_cfg)\n        self.dataset = dataset\n        self.tokenizer = tokenizer\n\n    def __len__(self):\n        return len(self.dataset)\n\n    def __getitem__(self, idx):\n        sample = self.dataset[idx]\n        input_ids = self.tokenizer.apply_chat_template(\n            sample[\"message_data\"], add_generation_prompt=True\n        )\n        sample[\"input_ids\"] = input_ids\n        sample[\"labels\"] = input_ids\n        sample[\"num_tokens\"] = len(input_ids)\n        return sample\n\n\nclass PromptCollator:\n\n    def __init__(self, pad_token_id=0, ignore_id=-100, pack_batch=False):\n        self.pack_batch = pack_batch\n        self.pad_token_id = pad_token_id\n        self.ignore_id = ignore_id\n\n    def __call__(self, instances):\n\n        _instances = []\n        for ins in instances:\n            if isinstance(ins, list):\n                _instances.extend(ins)\n            else:\n                _instances.append(ins)\n\n        instances = _instances\n\n        input_ids = []\n        labels = []\n        num_tokens = []\n        metadatas = []\n        message_datas = []\n\n        for data in instances:\n\n            input_ids.append(torch.LongTensor(data[\"input_ids\"]))\n            labels.append(torch.LongTensor(data[\"labels\"]))\n            metadatas.append(data[\"metadata\"])\n            message_datas.append(data[\"message_data\"])\n\n            if isinstance(data[\"num_tokens\"], int):\n                num_tokens.append(data[\"num_tokens\"])\n            else:\n                num_tokens.extend(data[\"num_tokens\"])\n\n        attention_mask = [torch.ones_like(ids) for ids in input_ids]\n        num_tokens = torch.IntTensor(num_tokens)\n\n        if len(instances) > 1 and self.pack_batch:\n\n            input_ids = torch.cat(input_ids, dim=0).unsqueeze(0)\n            labels = torch.cat(labels, dim=0).unsqueeze(0)\n            attention_mask = torch.cat(attention_mask, dim=0).unsqueeze(0)\n\n        elif len(instances) > 1 and not self.pack_batch:\n\n            input_ids = pad_sequence(\n                input_ids, batch_first=True, padding_value=self.pad_token_id\n            )\n            labels = pad_sequence(\n                labels, batch_first=True, padding_value=self.ignore_id\n            )\n            attention_mask = pad_sequence(\n                attention_mask, batch_first=True, padding_value=0\n            )\n        else:\n            input_ids = torch.stack(input_ids)\n            labels = torch.stack(labels)\n            attention_mask = torch.stack(attention_mask)\n\n        if input_ids.shape != labels.shape:\n            logger.error(f\"[instances] {instances}\")\n            logger.error(f\"[num_tokens] {num_tokens}\")\n            logger.error(f\"[input_ids] {input_ids}\")\n            logger.error(f\"[labels] {labels}\")\n            raise RuntimeError(\n                \"The shape of input_ids and labels must be \"\n                f\"equal, but  found {input_ids.shape} and \"\n                f\"{labels.shape}.\"\n            )\n        data_dict = {\n            \"input_ids\": input_ids,\n            \"labels\": labels,\n            \"num_tokens\": num_tokens,\n            \"attention_mask\": attention_mask.bool(),\n            \"metadata\": metadatas,\n            \"message_data\": message_datas,\n        }\n\n        return data_dict\n\n\nif __name__ == \"__main__\":\n    difficulty_balance_cfg = [\n        # pass rate range, repeat times\n        ((0.0, 0.2), 6),\n        ((0.2, 0.4), 4),\n        ((0.4, 0.6), 4),\n        ((0.6, 0.8), 2),\n    ]\n    from transformers import AutoTokenizer\n\n    tokenizer = AutoTokenizer.from_pretrained(\"oreal/OREAL-7B\")\n    dataset = OrealPromptDataset(\n        \"internlm/OREAL-RL-Prompts\", tokenizer, difficulty_balance_cfg\n    )\n    print(len(dataset))\n    print(dataset[0])\n    print(tokenizer.decode(dataset[0][\"input_ids\"]))\n"
  },
  {
    "path": "oreal/datasets/trajectory.py",
    "content": "# Copyright (c) InternLM. All rights reserved.\nimport json\nimport random\n\nimport numpy as np\nimport torch\nfrom xtuner._lite import get_logger\nfrom xtuner._lite.algorithms.sft.dataset import SftCollator\n\nlogger = get_logger()\n\n\nclass InferDataset(torch.utils.data.Dataset):\n\n    def __init__(self, prompts_input_ids, responses_ids, message_data, metadata):\n        super().__init__()\n\n        assert (\n            len(prompts_input_ids)\n            == len(responses_ids)\n            == len(message_data)\n            == len(metadata)\n        ), f\"The length of prompts_input_ids, responses_ids, message_data, metadata should be the same, but got {len(prompts_input_ids)}, {len(responses_ids)}, {len(message_data)}, {len(metadata)}\"\n        self.prompts_input_ids = prompts_input_ids\n        self.responses_ids = responses_ids\n        self.message_data = message_data\n        self.metadata = metadata\n\n    def __len__(self):\n        return len(self.prompts_input_ids)\n\n    def __getitem__(self, item):\n\n        prompt_input_ids = self.prompts_input_ids[item]\n        response_ids = self.responses_ids[item]\n        num_prefill_tokens = len(prompt_input_ids)\n\n        input_ids = prompt_input_ids + response_ids\n        labels = [-100] * (num_prefill_tokens - 1) + response_ids + [-100]\n\n        return {\n            \"input_ids\": input_ids,\n            \"labels\": labels,\n            \"num_tokens\": len(input_ids),\n            \"message_data\": self.message_data[item],\n            \"metadata\": self.metadata[item],\n        }\n\n\nclass TrajectoryDataset(torch.utils.data.Dataset):\n\n    def __init__(self):\n        super().__init__()\n        self._num_action_tokens = 0\n        self._num_total_tokens = 0\n        self._trajectories = []\n\n    @property\n    def num_action_tokens(self):\n        return self._num_action_tokens.item()\n\n    @property\n    def num_total_tokens(self):\n        return self._num_total_tokens\n\n    def update(self, trajectories):\n        num_total_tokens = 0\n        num_action_tokens = 0\n        for data in trajectories:\n            labels = np.array(data[\"labels\"])\n            num_total_tokens += labels.size\n            num_action_tokens += (labels >= 0).sum()\n\n        self._num_action_tokens = num_action_tokens\n        self._num_total_tokens = num_total_tokens\n\n        self._trajectories = trajectories\n\n    def dump_jsonl(self, path, tokenizer, debug=False):\n\n        with open(path, \"w\", encoding=\"utf8\") as f:\n            for data in self._trajectories:\n                json_line = {\n                    \"sequence\": (\n                        data[\"sequence_text\"]\n                        if \"sequence_text\" in data\n                        else tokenizer.decode(data[\"input_ids\"])\n                    ),\n                    \"num_tokens\": data[\"num_tokens\"],\n                }\n                json_line[\"judger_reward\"] = data[\"judger_reward\"]\n                json_line[\"judger_advantage\"] = data[\"judger_advantage\"]\n\n                if debug:\n                    json_line[\"input_ids\"] = data[\"input_ids\"]\n                    json_line[\"labels\"] = data[\"labels\"]\n\n                json_str = json.dumps(json_line, ensure_ascii=False)\n                f.write(json_str + \"\\n\")\n\n    def dump_log(self, path, tokenizer, debug=False):\n\n        with open(path, \"w\", encoding=\"utf8\") as f:\n            for data in self._trajectories:\n                log_string = f\"[sequence]:\\n{data['sequence_text'] if 'sequence_text' in data else tokenizer.decode(data['input_ids'])}\\n\\n\"\n                log_string += f\"[num_tokens]: {data['num_tokens']}\\n\"\n                log_string += f\"[judger_reward]: {data['judger_reward']}\\n\"\n                log_string += f\"[judger_advantage]: {data['judger_advantage']}\\n\"\n                f.write(log_string + \"\\n\\n=======================\\n\")\n\n    def __len__(self):\n        return len(self._trajectories)\n\n    def __getitem__(self, item):\n\n        return self._trajectories[item]\n\n\nclass TrajectoryDatasetWithFilter(TrajectoryDataset):\n    def __init__(self, repeat_k=1, only_keep_1_pair=True):\n        super().__init__()\n        self.repeat_k = repeat_k\n        self.only_keep_1_pair = only_keep_1_pair\n\n    def update(self, trajectories):\n        # split trajectories into k groups: (a, a, b, b, c, c) -> [(a, a), (b, b), (c, c)]\n        groups = [\n            trajectories[i : i + self.repeat_k]\n            for i in range(0, len(trajectories), self.repeat_k)\n        ]\n        keeped_trajectories = []\n        for group in groups:\n            correctness = [1 if data[\"judger_reward\"] == 1 else 0 for data in group]\n            correct = [data for data in group if data[\"judger_reward\"] == 1]\n            incorrect = [data for data in group if data[\"judger_reward\"] != 1]\n            pass_rate = sum(correctness) / len(correctness)\n            if self.only_keep_1_pair:\n                if pass_rate == 1 or pass_rate == 0:\n                    continue\n                # max keep 1 correct and 1 incorrect\n                correct = random.choice(correct)\n                incorrect = random.choice(incorrect)\n                correct[\"pass_rate\"] = pass_rate\n                incorrect[\"pass_rate\"] = pass_rate\n                keeped_trajectories.append(correct)\n                keeped_trajectories.append(incorrect)\n            else:\n                if pass_rate == 1 or pass_rate == 0:\n                    continue\n                for data in group:\n                    data[\"pass_rate\"] = pass_rate\n                    keeped_trajectories.append(data)\n\n        super().update(keeped_trajectories)\n\n\nclass TrajectoryCollator(SftCollator):\n\n    def __call__(self, instances):\n\n        data = super().__call__(instances)\n        data[\"judger_rewards\"] = [item[\"judger_reward\"] for item in instances]\n        data[\"judger_advantages\"] = [item[\"judger_advantage\"] for item in instances]\n        if \"pass_rate\" in instances[0]:\n            data[\"pass_rate\"] = [item[\"pass_rate\"] for item in instances]\n        return data\n"
  },
  {
    "path": "oreal/judgers/__init__.py",
    "content": "# Copyright (c) InternLM. All rights reserved.\nfrom .base_judger import (\n    BaseJudger,\n    register_judger,\n    registered_judgers,\n)\nfrom .math_judger import MathJudger\nfrom .router import InputData, ParallelRouter\n\n__all__ = [\n    \"register_judger\",\n    \"registered_judgers\",\n    \"BaseJudger\",\n    \"MathJudger\",\n    \"InputData\",\n    \"ParallelRouter\",\n]\n"
  },
  {
    "path": "oreal/judgers/base_judger.py",
    "content": "# Copyright (c) InternLM. All rights reserved.\nfrom abc import ABC, abstractmethod\nfrom dataclasses import dataclass\nfrom typing import (\n    Dict,\n    Generic,\n    List,\n    Optional,\n    Type,\n    TypedDict,\n    TypeVar,\n    Union,\n)\n\nT = TypeVar(\"T\")\nMessageItem = TypedDict(\"MessageItem\", {\"role\": str, \"content\": str})\nReward = Union[float, List[float], None]\nMetaData = TypedDict(\"MetaData\", {\"data_source\": str})\n\n\n@dataclass\nclass JudgeStatus(Generic[T]):\n    ok: bool = True\n    reason: Optional[str] = None\n    handle: Optional[T] = None\n\n\nclass BaseJudger(ABC):\n    def __init__(self):\n        pass\n\n    @abstractmethod\n    def on_data_received(\n        self,\n        prompt_messages: List[MessageItem],\n        completion_messages: List[MessageItem],\n        metadata: dict,\n    ) -> JudgeStatus:\n        raise NotImplementedError()\n\n    @abstractmethod\n    def on_reward_required(\n        self,\n        status: JudgeStatus,\n        timeout: Optional[float] = None,\n    ) -> Reward:\n        raise NotImplementedError()\n\n\nregistered_judgers: Dict[str, Type[BaseJudger]] = {}\n\n\ndef register_judger(name: str):\n    global registered_judgers\n\n    def wrapper(cls):\n        assert name not in registered_judgers, f\"{name} already in {registered_judgers}\"\n        registered_judgers[name] = cls\n        return cls\n\n    return wrapper\n"
  },
  {
    "path": "oreal/judgers/math_judger.py",
    "content": "# Copyright (c) InternLM. All rights reserved.\nimport random\nimport re\nimport time\nfrom typing import List, Optional, Tuple\n\nimport requests\n\nfrom .base_judger import BaseJudger, JudgeStatus, MessageItem, Reward, register_judger\nfrom .utils import extract_answer, math_equal\n\n\n@register_judger(\"math_judger\")\nclass MathJudger(BaseJudger):\n    verify_prompt = \"\"\"You are a helpful assistant who evaluates the correctness and quality of models' outputs.\n    Please as a grading expert, judge whether the final answers given by the candidates below are consistent with the standard answers, that is, whether the candidates answered correctly.\n\n    Here are some evaluation criteria:\n    1. Please refer to the given standard answer. You don't need to re-generate the answer to the question because the standard answer has been given. You only need to judge whether the candidate's answer is consistent with the standard answer according to the form of the question. Don't try to answer the original question. You can assume that the standard answer is definitely correct.\n    2. Because the candidate's answer may be different from the standard answer in the form of expression, before making a judgment, please understand the question and the standard answer first, and then judge whether the candidate's answer is correct, but be careful not to try to answer the original question.\n    3. Some answers may contain multiple items, such as multiple-choice questions, multiple-select questions, fill-in-the-blank questions, etc. As long as the answer is the same as the standard answer, it is enough. For multiple-select questions and multiple-blank fill-in-the-blank questions, the candidate needs to answer all the corresponding options or blanks correctly to be considered correct.\n    4. Some answers may be expressed in different ways, such as some answers may be a mathematical expression, some answers may be a textual description, as long as the meaning expressed is the same. And some formulas are expressed in different ways, but they are equivalent and correct.\n    5. If the prediction is given with \\\\boxed{{}}, please ignore the \\\\boxed{{}} and only judge whether the candidate's answer is consistent with the standard answer.\n\n    Please judge whether the following answers are consistent with the standard answer based on the above criteria. Grade the predicted answer of this new question as one of:\n    A: CORRECT\n    B: INCORRECT\n    Just return the letters \\\"A\\\" or \\\"B\\\", with no text around it.\n\n    Here is your task. Simply reply with either CORRECT, INCORRECT. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.\n\n\n    <Original Question Begin>:\n    {question}\n    <Original Question End>\n\n\n    <Gold Target Begin>:\n    {gold_answer}\n    <Gold Target End>\n\n\n    <Predicted Answer Begin>:\n    {answer}\n    <Predicted End>\n\n\n    Judging the correctness of candidates' answers:\"\"\"\n\n    def __init__(\n        self,\n        hosts: List[str],\n        max_retries: int = 1,\n        retry_delay: float = 1.0,\n        stop_word=\"<|im_end|>\",\n        thinking_finish_words=[\"<conclude>\", \"**Final Answer**\", \"</think>\"],\n    ):\n        super().__init__()\n        self.hosts = hosts\n        self.max_retries = max_retries\n        self.retry_delay = retry_delay\n        self.stop_word = stop_word\n        self.thinking_finish_words = thinking_finish_words\n\n        self.host_ip_idx = random.randint(0, len(hosts) - 1)\n        self.model_name = requests.get(\n            f\"http://{self.hosts[self.host_ip_idx]}/v1/models\",\n            headers={\"Authorization\": \"Bearer \"},\n        ).json()[\"data\"][0][\"id\"]\n\n    def on_data_received(\n        self,\n        prompt_messages: List[MessageItem],\n        completion_messages: List[MessageItem],\n        metadata: dict,\n    ) -> JudgeStatus:\n        question = prompt_messages[-1][\"content\"]\n        response = completion_messages[-1][\"content\"]\n        question_type = metadata.get(\"question_type\", None)\n        gold_answer = metadata[\"gold_answer\"]\n        if not response.strip().endswith(self.stop_word):\n            # If the response does not end with the stop word, it is not a complete response, treat as incorrect\n            return JudgeStatus(\n                ok=True,\n                handle={\n                    \"question\": question,\n                    \"question_type\": question_type,\n                    \"response\": response,\n                    \"gold_answer\": gold_answer,\n                    \"verify_label\": False,\n                },\n            )\n\n        for thinking_finish_word in self.thinking_finish_words:\n            if thinking_finish_word in response:\n                response = response.split(thinking_finish_word)[-1]\n\n        response = response.replace(self.stop_word, \"\")\n\n        # first try to extract and verify with rule, if correct, return\n        extracted_answer, verify_label = self._extract_and_verify_with_logic(\n            response, gold_answer\n        )\n        if verify_label is True:\n            return JudgeStatus(\n                ok=True,\n                handle={\n                    \"question\": question,\n                    \"question_type\": question_type,\n                    \"response\": response,\n                    \"gold_answer\": gold_answer,\n                    \"verify_label\": verify_label,\n                },\n            )\n\n        # then try to evaluate with model\n        res_string, verify_label = self._evaluate_answer_with_llm(\n            question, question_type, response, gold_answer\n        )\n        return JudgeStatus(\n            ok=True,\n            handle={\n                \"question\": question,\n                \"question_type\": question_type,\n                \"response\": response,\n                \"gold_answer\": gold_answer,\n                \"verify_label\": verify_label,\n            },\n        )\n\n    def on_reward_required(\n        self, status: JudgeStatus, timeout: Optional[float] = None\n    ) -> Reward:\n        if status.handle is None:\n            return None\n        if status.handle[\"verify_label\"] is not None:\n            return 1.0 if status.handle[\"verify_label\"] else -1.0\n        return None\n\n    def _evaluate_answer_with_llm(\n        self, question: str, question_type: str, answer: str, gold_answer: str\n    ) -> Tuple[str, bool]:\n        for i in range(self.max_retries):\n            host = self.hosts[self.host_ip_idx]\n            self.host_ip_idx = (self.host_ip_idx + 1) % len(self.hosts)\n            prompt = self.verify_prompt.format(\n                \"\", \"\", question=question, answer=answer, gold_answer=gold_answer\n            )\n            try:\n                res = requests.post(\n                    f\"http://{host}/v1/chat/completions\",\n                    json={\n                        \"model\": self.model_name,\n                        \"messages\": [\n                            {\n                                \"role\": \"user\",\n                                \"content\": prompt,\n                            }\n                        ],\n                        \"temperature\": 0.0,\n                        \"top_p\": 0.8,\n                        \"top_k\": 20,\n                        \"repetition_penalty\": 1.05,\n                        \"max_tokens\": 100,\n                        \"stop\": [\"<|im_end|>\", \"<|endoftext|>\"],\n                    },\n                )\n                res_string = res.json()[\"choices\"][0][\"message\"][\"content\"]\n                print(f\"Evaluate result: {res_string}\")\n                verify_label = self._verify_from_string(res_string)\n                if verify_label is None:\n                    raise ValueError(\n                        f\"Evaluate result is None, judger prediction: {res_string}\"\n                    )\n                return res_string, verify_label\n\n            except Exception as e:\n                print(f\"Error verifying answer: {e}\")\n                time.sleep(self.retry_delay)\n                continue\n        print(f\"Failed to verify answer after {self.max_retries} retries.\")\n        return None, None\n\n    def _verify_from_string(self, verification: str):\n        if \"A\" in verification and \"B\" not in verification:\n            label = True\n        elif \"B\" in verification and \"A\" not in verification:\n            label = False\n        else:  # judger model failed to predict A or B\n            label = None\n        return label\n\n    def _extract_and_verify_with_logic(\n        self, response: str, gold_answer: str\n    ) -> Tuple[str, bool]:\n        extracted_answer = extract_answer(response)\n        verify_label = math_equal(extracted_answer, gold_answer)\n        return extracted_answer, verify_label\n"
  },
  {
    "path": "oreal/judgers/router.py",
    "content": "# Copyright (c) InternLM. All rights reserved.\nimport atexit\nimport functools\nimport os\nimport queue\nimport time\nimport traceback\nfrom collections import defaultdict\nfrom copy import deepcopy\nfrom dataclasses import dataclass\nfrom multiprocessing import Event, Process, Queue, connection\nfrom multiprocessing.synchronize import Event as EventClass\nfrom typing import (\n    Callable,\n    Dict,\n    Generic,\n    List,\n    Optional,\n    Tuple,\n    TypedDict,\n    TypeVar,\n    cast,\n)\nfrom uuid import uuid4\n\nimport loguru\nfrom typing_extensions import NotRequired\n\nfrom .base_judger import (\n    JudgeStatus,\n    MessageItem,\n    MetaData,\n    Reward,\n    registered_judgers,\n)\n\n\nclass InputData(TypedDict):\n    prompt_messages: List[MessageItem]\n    completion_messages: List[MessageItem]\n    metadata: NotRequired[MetaData]\n\n\nT = TypeVar(\"T\")\n\n\n@dataclass\nclass GenericTask(Generic[T]):\n    token: str\n    index: int\n    judger: str\n    content: T\n\n\n@dataclass\nclass SubprocessConfig:\n    loguru_handlers: Optional[List[dict]] = None\n    worker_init_func: Optional[Callable] = None\n\n\nclass ParallelRouter:\n    def __init__(\n        self,\n        judgers_config: Dict[str, dict],\n        data_judger_mapping: Dict[str, Optional[List[str]]],\n        logger: Optional[\"loguru.Logger\"] = None,\n        subprocess_config: Optional[SubprocessConfig] = None,\n    ):\n        if logger is not None:\n            self.logger = logger\n        else:\n            import mock\n\n            self.logger = mock.Mock()\n\n        if subprocess_config is not None:\n            self.subprocess_config = subprocess_config\n        else:\n            self.subprocess_config = SubprocessConfig()\n\n        if not (\n            isinstance(judgers_config, dict)\n            and all(\n                isinstance(k, str) and isinstance(v, dict)\n                for k, v in judgers_config.items()\n            )\n        ):\n            raise TypeError(\n                f\"Illegal judgers_config: {judgers_config}\\n\"\n                \"Should be Dict[str, dict]\"\n            )\n        if \"RM\" in judgers_config.keys():\n            raise KeyError(\n                f\"'RM' is a reserved judger keywork for {self.__class__.__name__}, \"\n                f\"please remove it from judgers_config: {judgers_config}\"\n            )\n        self.judgers_config = judgers_config\n\n        data_judger_mapping: Dict[str, List[str]] = {\n            k: v or [] for k, v in data_judger_mapping.items()\n        }  # change None to empty list []\n        if not (\n            isinstance(data_judger_mapping, dict)\n            and all(\n                isinstance(k, str)\n                and isinstance(v, (list, tuple, set))\n                and all(isinstance(vv, str) for vv in v)\n                for k, v in data_judger_mapping.items()\n            )\n        ):\n            raise TypeError(\n                f\"Illegal data_judger_mapping: {data_judger_mapping}\\n\"\n                \"Should be Dict[str, List[str]]\"\n            )\n        self.data_judger_mapping = data_judger_mapping\n\n        avail_judgers = set(self.judgers_config.keys()) | {\"RM\"}\n        _used_judgers: List[str] = []\n        for v in data_judger_mapping.values():\n            _used_judgers.extend(v)\n        used_judgers: set = set(_used_judgers)\n        if unused := avail_judgers - used_judgers:\n            self.logger.warning(\n                \"Following judgers are available but not \"\n                f\"used in data mapping: {unused}\\n\"\n                \"Please make sure this is intended\"\n            )\n            # remove unused configs\n            for judger_name in unused:\n                self.judgers_config.pop(judger_name, None)\n        if missing := used_judgers - avail_judgers:\n            self.logger.warning(\n                \"Following judgers are configured to be used \"\n                f\"but not built in data mapping: {missing}\\n\"\n                \"Please make sure this is intended\"\n            )\n            # remove missing judgers from mapping, to prevent potential errors\n            for source in list(self.data_judger_mapping.keys()):\n                before = set(self.data_judger_mapping[source])\n                self.data_judger_mapping[source] = list(before - missing)\n            # then filter out data_mapping without available judgers\n            self.data_judger_mapping = {\n                source: judgers\n                for source, judgers in self.data_judger_mapping.items()\n                if len(judgers) > 0\n            }\n\n        # Try build judgers in __init__ so that raise Exceptions earlly\n        for judger_name, judger_conf in self.judgers_config.items():\n            _ = self._build_judger(judger_name, judger_conf)\n\n        self._processes: List[Process] = []\n        self._stop_event = Event()\n        atexit.register(self.shutdown)\n\n        self._input_queues: Dict[str, Queue[GenericTask[InputData]]] = {\n            judger_name: Queue() for judger_name in self.judgers_config.keys()\n        }\n        self._output_queue: Queue[GenericTask[Reward]] = Queue()\n        self._exc_queue: Queue[Tuple[str, Exception]] = Queue()\n        self._num_tasks: Dict[str, int] = {}  # for each token\n        self._num_indexes: Dict[str, int] = {}  # for each token\n        self._results_buffer: Dict[str, List[GenericTask[Reward]]] = defaultdict(\n            list\n        )  # results buffer grouped by the key \"token\"\n\n    def submit(self, data_batch: List[InputData]):\n        indexes_for_ext: List[int] = []\n        indexes_for_local: List[int] = []\n        tasks_input: List[GenericTask[InputData]] = []\n        token = str(uuid4())\n        for index, data_item in enumerate(data_batch):\n            if (\n                not isinstance(data_item, dict)\n                or \"metadata\" not in data_item\n                or \"prompt_messages\" not in data_item\n                or \"completion_messages\" not in data_item\n            ):\n                indexes_for_local.append(index)\n                continue\n            source = data_item[\"metadata\"].get(\"data_source\", None)\n            if source is None or source not in self.data_judger_mapping:\n                indexes_for_local.append(index)\n                continue\n            indexes_for_ext.append(index)\n            for judger in self.data_judger_mapping[source]:\n                if judger == \"RM\":\n                    indexes_for_local.append(index)\n                else:\n                    tasks_input.append(\n                        GenericTask(\n                            token=token,\n                            index=index,\n                            judger=judger,\n                            content=data_item,\n                        )\n                    )\n\n        self._num_tasks[token] = len(tasks_input)\n        self._num_indexes[token] = len(data_batch)\n        for task in tasks_input:\n            self._input_queues[task.judger].put(task, block=True, timeout=1)\n\n        if not self._processes:\n            self.logger.debug(\"Starting processes...\")\n            for judger_name, judger_conf in self.judgers_config.items():\n                num_proc = judger_conf.pop(\"num_processes\", 1)\n                self._processes.extend(\n                    [\n                        Process(\n                            target=ParallelRouter._safe_process_worker,\n                            kwargs={\n                                \"stop_event\": self._stop_event,\n                                \"judger_name\": judger_name,\n                                \"judger_conf\": judger_conf,\n                                \"input_queue\": self._input_queues[judger_name],\n                                \"output_queue\": self._output_queue,\n                                \"exc_queue\": self._exc_queue,\n                                \"config\": self.subprocess_config,\n                            },\n                            daemon=True,\n                        )\n                        for _ in range(num_proc)\n                    ]\n                )\n            for p in self._processes:\n                p.start()\n            self.logger.debug(f\"Start processes done, total {len(self._processes)}\")\n\n        return token, indexes_for_local\n\n    def query(\n        self, token: str, timeout: float = 0\n    ) -> Optional[List[Optional[Dict[str, Reward]]]]:\n        start = time.time()\n        while True:\n            self._try_catch_subprocess_exceptions()\n            try:\n                result = self._output_queue.get(timeout=0.1)\n                self._results_buffer[result.token].append(result)\n            except queue.Empty:\n                pass\n            if len(self._results_buffer[token]) == self._num_tasks[token]:\n                results = self._results_buffer.pop(token)\n                num_tasks = self._num_tasks.pop(token)\n                num_indexes = self._num_indexes.pop(token)\n                rewards: List[Dict[str, Reward]] = [{} for _ in range(num_indexes)]\n                for result in results:\n                    reward = result.content\n                    if result.judger in rewards[result.index]:\n                        self.logger.warning(\n                            f\"{result.judger} already exists: {rewards[result.index]}, \"\n                            f\"will replace --> {reward}\"\n                        )\n                    rewards[result.index][result.judger] = reward\n                # convert empty dicts to None\n                return [r or None for r in rewards]\n            if timeout > 0 and (time.time() - start) > timeout:\n                raise TimeoutError(\n                    f\"Timeout after {timeout} seconds, got {len(self._results_buffer[token])} results, expected {self._num_tasks[token]}\"\n                )\n\n    @staticmethod\n    def _safe_process_worker(\n        stop_event: EventClass,\n        judger_name: str,\n        judger_conf: dict,\n        input_queue: \"Queue[GenericTask[InputData]]\",\n        output_queue: \"Queue[GenericTask[Reward]]\",\n        exc_queue: \"Queue[Tuple[str, Exception]]\",\n        config: SubprocessConfig,\n    ):\n        try:\n            ParallelRouter._process_worker(\n                stop_event=stop_event,\n                judger_name=judger_name,\n                judger_conf=judger_conf,\n                input_queue=input_queue,\n                output_queue=output_queue,\n                exc_queue=exc_queue,\n                config=config,\n            )\n        except Exception as e:\n            exc_queue.put((judger_name, e), timeout=1)\n\n    @staticmethod\n    def _process_worker(\n        stop_event: EventClass,\n        judger_name: str,\n        judger_conf: dict,\n        input_queue: \"Queue[GenericTask[InputData]]\",\n        output_queue: \"Queue[GenericTask[Reward]]\",\n        exc_queue: \"Queue[Tuple[str, Exception]]\",\n        config: SubprocessConfig,\n    ):\n        from xtuner._lite import get_logger\n\n        logger = get_logger()\n        if config.loguru_handlers is not None:\n            for handler in config.loguru_handlers:\n                handler[\"enqueue\"] = True\n                logger.add(*handler)\n        if config.worker_init_func is not None:\n            config.worker_init_func()\n\n        # Infer num threads for each stage according to configs\n        _num_threads = judger_conf.pop(\"concurrency_per_proc\", (1, 1))\n        if isinstance(_num_threads, (tuple, list)) and len(_num_threads) == 2:\n            num_threads_s1, num_threads_s2 = _num_threads\n        elif isinstance(_num_threads, int):\n            num_threads_s1 = max(1, _num_threads // 2)\n            num_threads_s2 = max(1, _num_threads - num_threads_s1)\n        else:\n            raise TypeError(\n                \"`concurrency_per_proc` in judger_conf should be int or \"\n                f\"Tuple[int, int], got {type(_num_threads)}: {_num_threads}\"\n            )\n\n        # Lazy build judgers in subprocesses to avoid serialization errors\n        judger = ParallelRouter._build_judger(judger_name, judger_conf)\n        # input_queue = self._input_queues[judger_name]\n        # output_queue = self._output_queue\n        handle_queue: queue.Queue[GenericTask[JudgeStatus]] = queue.Queue()\n        log_prefix = f\"[pid={os.getpid()},{judger_name}]\"\n\n        def report_exc_wrapper(func):\n            @functools.wraps(func)\n            def wrapper(*args, **kwargs):\n                try:\n                    return func(*args, **kwargs)\n                except Exception as e:\n                    stack_trace = traceback.format_exc()\n                    logger.error(\n                        f\"{log_prefix} \"\n                        f\"Thread worker of {judger_name} raised \"\n                        f\"{type(e).__name__}: {e}\",\n                        f\"Stack trace: {stack_trace}\",\n                    )\n                    exc_queue.put((judger_name, e), timeout=1)\n\n            return wrapper\n\n        # Stage 1: input_queue -> judger.on_data_received -> handle_queue\n        @report_exc_wrapper\n        def thread_worker_s1():\n            while not stop_event.is_set():\n                try:\n                    task = input_queue.get(timeout=0.1)\n                    logger.debug(f\"{log_prefix} dequeue input: {task}\")\n                except queue.Empty:\n                    logger.debug(f\"{log_prefix} input queue empty\")\n                    time.sleep(0.1)\n                    continue\n                data = task.content\n                if \"metadata\" not in data:\n                    raise RuntimeError(\n                        f\"'metadata' not in data.keys(): {list(data.keys())}\"\n                    )\n                logger.debug(f\"{log_prefix} on_data_received\")\n                handle = judger.on_data_received(\n                    data[\"prompt_messages\"],\n                    data[\"completion_messages\"],\n                    cast(dict, data[\"metadata\"]),\n                )\n                logger.debug(f\"{log_prefix} got handle\")\n                new_task = GenericTask(\n                    token=task.token,\n                    index=task.index,\n                    judger=task.judger,\n                    content=handle,\n                )\n                while True:\n                    try:\n                        handle_queue.put(\n                            new_task,\n                            timeout=0.1,\n                        )\n                        logger.debug(f\"{log_prefix} enqueue handle: {new_task}\")\n                        break\n                    except queue.Full:\n                        time.sleep(0.1)\n\n        # Stage 2: handle_queue -> judger.on_reward_required -> output_queue\n        @report_exc_wrapper\n        def thread_worker_s2():\n            while not stop_event.is_set():\n                try:\n                    task = handle_queue.get(timeout=0.1)\n                    logger.debug(f\"{log_prefix} dequeue handle: {task}\")\n                except queue.Empty:\n                    logger.debug(f\"{log_prefix} handle queue empty\")\n                    time.sleep(0.1)\n                    continue\n                logger.debug(f\"{log_prefix} on_reward_required\")\n                reward = judger.on_reward_required(task.content)\n                logger.info(f\"{log_prefix} got result\")\n                new_task = GenericTask(\n                    token=task.token,\n                    index=task.index,\n                    judger=task.judger,\n                    content=reward,\n                )\n                while True:\n                    try:\n                        output_queue.put(\n                            new_task,\n                            timeout=0.1,\n                        )\n                        logger.debug(f\"{log_prefix} enqueue output: {new_task}\")\n                        break\n                    except queue.Full:\n                        time.sleep(0.1)\n\n        from threading import Thread\n\n        threads: List[Thread] = []\n        for _ in range(num_threads_s1):\n            threads.append(Thread(target=thread_worker_s1, daemon=True))\n        for _ in range(num_threads_s2):\n            threads.append(Thread(target=thread_worker_s2, daemon=True))\n        for t in threads:\n            t.start()\n        for t in threads:\n            t.join()\n\n    @staticmethod\n    def _build_judger(judger_name: str, judger_conf: dict):\n        judger_conf = deepcopy(judger_conf)\n        judger_conf.pop(\"num_processes\", None)\n        judger_conf.pop(\"concurrency_per_proc\", None)\n        _type = judger_conf.pop(\"type\", None)\n        if _type is None:\n            _type = judger_name\n        if _type not in registered_judgers:\n            raise KeyError(\n                f\"{judger_name} use unregistered judger type: {_type}. \"\n                f\"Available judgers are: {list(registered_judgers.keys())}\"\n            )\n        cls = registered_judgers[_type]\n        return cls(**judger_conf)\n\n    def _try_catch_subprocess_exceptions(self):\n        exc_handles: List[Tuple[str, Exception]] = []\n        while True:\n            try:\n                exc_handle = self._exc_queue.get(timeout=0.001)\n                exc_handles.append(exc_handle)\n            except queue.Empty:\n                break\n        if exc_handles:\n            error_message = \"\\n\".join(\n                [\n                    f\"- [{judger_name}] {type(exc).__name__}: {exc}\"\n                    for judger_name, exc in exc_handles\n                ]\n            )\n            raise RuntimeError(\n                \"Following threads/processes raise exceptions unexpectedly:\\n\"\n                f\"{error_message}\\n\"\n                \"Program terminated\"\n            )\n\n    def shutdown(self, timeout: float = 2.0):\n        if not hasattr(self, \"_processes\") or not self._processes:\n            return\n        if not self._stop_event.is_set():\n            self._stop_event.set()\n        connection.wait([p.sentinel for p in self._processes], timeout=timeout)\n        for p in self._processes:\n            if p.is_alive():\n                p.kill()\n                p.join()\n        self._processes = []\n"
  },
  {
    "path": "oreal/judgers/utils.py",
    "content": "# flake8: noqa\n# isort: skip_file\n\nimport multiprocessing\nimport re\nfrom math import isclose\nfrom typing import Optional, Union\nfrom collections import defaultdict, Counter\n\nfrom sympy import N, simplify\nfrom sympy.parsing.latex import parse_latex\nfrom sympy.parsing.sympy_parser import parse_expr\n\n\ndef extract_answer(pred_str: str, execute: bool = False) -> str:\n    if re.search(\"\\\\boxed|boxed|\\\\box|box\", pred_str):\n        answer = re.split(\"\\\\boxed|boxed|\\\\box|box\", pred_str)[-1]\n        if len(answer) == 0:\n            return \"\"\n        elif answer[0] == \"{\":\n            stack = 1\n            a = \"\"\n            for c in answer[1:]:\n                if c == \"{\":\n                    stack += 1\n                    a += c\n                elif c == \"}\":\n                    stack -= 1\n                    if stack == 0:\n                        break\n                    a += c\n                else:\n                    a += c\n        else:\n            a = answer.split(\"$\")[0].strip()\n    elif re.search(\"[Tt]he (final )?answer is:?\", pred_str):\n        a = re.split(\"[Tt]he (final )?answer is:?\", pred_str)[-1].strip().rstrip(\".\")\n    else:  # use the last number\n        pred = re.findall(r\"-?\\d*\\.?\\d+\", pred_str.replace(\",\", \"\"))\n        if len(pred) >= 1:\n            a = pred[-1]\n        else:\n            a = \"\"\n    choice = re.findall(r\"([A-E]):\\s*(.*)\", a)\n    if len(choice) > 0:\n        for option, content in choice:\n            a = option\n    choice = re.findall(r\"\\(([A-E])\\)\\s*(.*)\", a)\n    if len(choice) > 0:\n        for option, content in choice:\n            a = option\n\n    a = re.split(r\"=|\\\\approx|≈\", a)[-1]\n\n    # multiple lines\n    answer = \"\"\n    preds = re.split(\"\\n\", a)\n    for pred in preds:\n        if \"\\\\begin{align\" in pred or pred.endswith(\":\"):\n            continue\n        if pred != \"\" and pred[0] == \":\":\n            pred = pred[1:]\n        if pred != \"\" and pred[-1] == \".\":\n            pred = pred[:-1]\n        if pred != \"\" and pred[-1] == \"/\":\n            pred = pred[:-1]\n        pred = strip_string(pred)\n        pred = re.sub(r\"^[a-zA-Z0-9]+[\\)]\\s*\", \"\", pred)\n        for p in pred.split(\"{}\"):\n            if p != \"\":\n                pred = p\n                break\n\n        pred = re.sub(r\"^\\{([A-Z])\\}|\\(([A-Z])\\)\", r\"\\1\\2\", pred)\n        if pred != \"\":\n            answer = pred\n            break\n    return answer\n\n\ndef _fix_fracs(string):\n    substrs = string.split(\"\\\\frac\")\n    new_str = substrs[0]\n    if len(substrs) > 1:\n        substrs = substrs[1:]\n        for substr in substrs:\n            new_str += \"\\\\frac\"\n            if len(substr) > 0 and substr[0] == \"{\":\n                new_str += substr\n            else:\n                try:\n                    assert len(substr) >= 2\n                except Exception:\n                    return string\n                a = substr[0]\n                b = substr[1]\n                if b != \"{\":\n                    if len(substr) > 2:\n                        post_substr = substr[2:]\n                        new_str += \"{\" + a + \"}{\" + b + \"}\" + post_substr\n                    else:\n                        new_str += \"{\" + a + \"}{\" + b + \"}\"\n                else:\n                    if len(substr) > 2:\n                        post_substr = substr[2:]\n                        new_str += \"{\" + a + \"}\" + b + post_substr\n                    else:\n                        new_str += \"{\" + a + \"}\" + b\n    string = new_str\n    return string\n\n\ndef _fix_a_slash_b(string):\n    if len(string.split(\"/\")) != 2:\n        return string\n    a = string.split(\"/\")[0]\n    b = string.split(\"/\")[1]\n    try:\n        if \"sqrt\" not in a:\n            a = int(a)\n        if \"sqrt\" not in b:\n            b = int(b)\n        assert string == f\"{a}/{b}\"\n        new_string = \"\\\\frac{\" + str(a) + \"}{\" + str(b) + \"}\"\n        return new_string\n    except Exception:\n        return string\n\n\ndef _fix_sqrt(string):\n    _string = re.sub(r\"\\\\sqrt(\\w+)\", r\"\\\\sqrt{\\1}\", string)\n    return _string\n\n\ndef strip_string(string):\n    string = str(string).strip()\n    # linebreaks\n    string = string.replace(\"\\n\", \"\")\n\n    # right \".\"\n    string = string.rstrip(\".\")\n\n    # remove inverse spaces\n    string = string.replace(\"\\\\!\", \"\")\n    string = string.replace(\"\\\\ \", \"\")\n\n    # replace \\\\ with \\\n    string = string.replace(\"\\\\\\\\\", \"\\\\\")\n    string = string.replace(\"\\\\\\\\\", \"\\\\\")\n\n    # replace tfrac and dfrac with frac\n    string = string.replace(\"tfrac\", \"frac\")\n    string = string.replace(\"dfrac\", \"frac\")\n\n    # remove \\left and \\right\n    string = string.replace(\"\\\\left\", \"\")\n    string = string.replace(\"\\\\right\", \"\")\n\n    # Remove unit: miles, dollars if after is not none\n    _string = re.sub(r\"\\\\text{.*?}$\", \"\", string).strip()\n    if _string != \"\" and _string != string:\n        # print(\"Warning: unit not removed: '{}' -> '{}'\".format(string, _string))\n        string = _string\n\n    # Remove circ (degrees)\n    string = string.replace(\"^{\\\\circ}\", \"\")\n    string = string.replace(\"^\\\\circ\", \"\")\n\n    # remove dollar signs\n    string = string.replace(\"\\\\$\", \"\")\n    string = string.replace(\"$\", \"\")\n\n    string = string.replace(\"\\\\text\", \"\")\n    string = string.replace(\"x\\\\in\", \"\")\n\n    # remove percentage\n    string = string.replace(\"\\\\%\", \"\")\n    string = string.replace(r\"\\%\", \"\")\n    string = string.replace(\"%\", \"\")\n\n    # \" 0.\" equivalent to \" .\" and \"{0.\" equivalent to \"{.\" Alternatively, add \"0\" if \".\" is the start of the string\n    string = string.replace(\" .\", \" 0.\")\n    string = string.replace(\"{.\", \"{0.\")\n\n    # cdot\n    string = string.replace(\"\\\\cdot\", \"\")\n\n    # inf\n    string = string.replace(\"infinity\", \"\\\\infty\")\n    if \"\\\\infty\" not in string:\n        string = string.replace(\"inf\", \"\\\\infty\")\n    string = string.replace(\"+\\\\inity\", \"\\\\infty\")\n\n    # and\n    string = string.replace(\"and\", \"\")\n    string = string.replace(\"\\\\mathbf\", \"\")\n\n    # use regex to remove \\mbox{...}\n    string = re.sub(r\"\\\\mbox{.*?}\", \"\", string)\n\n    # quote\n    string.replace(\"'\", \"\")\n    string.replace('\"', \"\")\n\n    # i, j\n    if \"j\" in string and \"i\" not in string:\n        string = string.replace(\"j\", \"i\")\n\n    # replace a.000b where b is not number or b is end, with ab, use regex\n    string = re.sub(r\"(\\d+)\\.0+([^\\d])\", r\"\\1\\2\", string)\n    string = re.sub(r\"(\\d+)\\.0+$\", r\"\\1\", string)\n\n    # if empty, return empty string\n    if len(string) == 0:\n        return string\n    if string[0] == \".\":\n        string = \"0\" + string\n\n    # to consider: get rid of e.g. \"k = \" or \"q = \" at beginning\n    if len(string.split(\"=\")) == 2:\n        if len(string.split(\"=\")[0]) <= 2:\n            string = string.split(\"=\")[1]\n\n    string = _fix_sqrt(string)\n    string = string.replace(\" \", \"\")\n\n    # \\frac1b or \\frac12 --> \\frac{1}{b} and \\frac{1}{2}, etc. Even works with \\frac1{72} (but not \\frac{72}1). Also does a/b --> \\\\frac{a}{b}\n    string = _fix_fracs(string)\n\n    # NOTE: X/Y changed to \\frac{X}{Y} in dataset, but in simple cases fix in case the model output is X/Y\n    string = _fix_a_slash_b(string)\n\n    return string\n\n\ndef last_boxed_only_string(string):\n    idx = string.rfind(\"\\\\boxed\")\n    if idx < 0:\n        idx = string.rfind(\"\\\\fbox\")\n        if idx < 0:\n            return None\n\n    i = idx\n    right_brace_idx = None\n    num_left_braces_open = 0\n    while i < len(string):\n        if string[i] == \"{\":\n            num_left_braces_open += 1\n        if string[i] == \"}\":\n            num_left_braces_open -= 1\n            if num_left_braces_open == 0:\n                right_brace_idx = i\n                break\n        i += 1\n\n    if right_brace_idx is None:\n        retval = None\n    else:\n        retval = string[idx : right_brace_idx + 1]\n\n    return retval\n\n\ndef extract_answer(pred_str: str, execute: bool = False) -> str:\n    if re.search(\"\\boxed|boxed\", pred_str):\n        answer = re.split(\"\\boxed|boxed\", pred_str)[-1]\n        if len(answer) == 0:\n            return \"\"\n        elif answer[0] == \"{\":\n            stack = 1\n            a = \"\"\n            for c in answer[1:]:\n                if c == \"{\":\n                    stack += 1\n                    a += c\n                elif c == \"}\":\n                    stack -= 1\n                    if stack == 0:\n                        break\n                    a += c\n                else:\n                    a += c\n        else:\n            a = answer.split(\"$\")[0].strip()\n    elif re.search(\"[Tt]he (final )?answer is:?\", pred_str):\n        a = re.split(\"[Tt]he (final )?answer is:?\", pred_str)[-1].strip().rstrip(\".\")\n    elif pred_str.startswith(\"```python\") and execute:\n        # fall back to program\n        from lagent import get_tool\n\n        a = get_tool(\"IPythonInteractive\").exec(pred_str).value or \"\"\n    else:  # use the last number\n        pred = re.findall(r\"-?\\d*\\.?\\d+\", pred_str.replace(\",\", \"\"))\n        if len(pred) >= 1:\n            a = pred[-1]\n        else:\n            a = \"\"\n    # multiple lines\n    pred = a.split(\"\\n\")[0]\n    if pred != \"\" and pred[0] == \":\":\n        pred = pred[1:]\n    if pred != \"\" and pred[-1] == \".\":\n        pred = pred[:-1]\n    if pred != \"\" and pred[-1] == \"/\":\n        pred = pred[:-1]\n    pred = strip_string(pred)\n    return pred\n\n\ndef is_digit(s):\n    try:\n        float(str(s).replace(\",\", \"\"))\n        return True\n    except ValueError:\n        return False\n\n\ndef math_equal(\n    prediction: Union[bool, float, str],\n    reference: Union[float, str],\n    include_percentage: bool = True,\n    is_close: bool = True,\n    tolerance: float = 1e-4,\n    timeout: bool = False,\n) -> bool:\n    \"\"\"Exact match of math if and only if:\n\n    1. numerical equal: both can convert to float and are equal\n    2. symbolic equal: both can convert to sympy expression and are equal\n    \"\"\"\n    try:  # 1. numerical equal\n        if is_digit(prediction) and is_digit(reference):\n            prediction = float(str(prediction).replace(\",\", \"\"))\n            reference = float(str(reference).replace(\",\", \"\"))\n            # number questions\n            if include_percentage:\n                gt_result = [reference / 100, reference, reference * 100]\n            else:\n                gt_result = [reference]\n            for item in gt_result:\n                try:\n                    if is_close:\n                        if isclose(item, prediction, rel_tol=tolerance):\n                            return True\n                    else:\n                        if item == prediction:\n                            return True\n                except Exception:\n                    continue\n            return False\n    except Exception:\n        pass\n\n    if not prediction and prediction not in [0, False]:\n        return False\n\n    # 2. symbolic equal\n    reference = str(reference).strip()\n    prediction = str(prediction).strip()\n\n    ## deal with [], (), {}\n    pred_str, ref_str = prediction, reference\n    if (\n        prediction.startswith(\"[\")\n        and prediction.endswith(\"]\")\n        and not reference.startswith(\"(\")\n    ) or (\n        prediction.startswith(\"(\")\n        and prediction.endswith(\")\")\n        and not reference.startswith(\"[\")\n    ):\n        pred_str = pred_str.strip(\"[]()\")\n        ref_str = ref_str.strip(\"[]()\")\n    for s in [\"{\", \"}\", \"(\", \")\"]:\n        ref_str = ref_str.replace(s, \"\")\n        pred_str = pred_str.replace(s, \"\")\n    if pred_str == ref_str:\n        return True\n\n    ## [a, b] vs. [c, d], return a==c and b==d\n    if (\n        (prediction.startswith(\"[\") and prediction.endswith(\"]\"))\n        and (reference.startswith(\"[\") and reference.endswith(\"]\"))\n        or (prediction.startswith(\"(\") and prediction.endswith(\")\"))\n        and (reference.startswith(\"(\") and reference.endswith(\")\"))\n    ):\n        pred_parts = prediction[1:-1].split(\",\")\n        ref_parts = reference[1:-1].split(\",\")\n        if len(pred_parts) == len(ref_parts):\n            if all(\n                [\n                    math_equal(\n                        pred_parts[i], ref_parts[i], include_percentage, is_close\n                    )\n                    for i in range(len(pred_parts))\n                ]\n            ):\n                return True\n\n    # symbolic equal with sympy\n    if timeout:\n        if call_with_timeout(symbolic_equal_process, prediction, reference):\n            return True\n    else:\n        if symbolic_equal(prediction, reference):\n            return True\n\n    return False\n\n\ndef math_equal_process(param):\n    return math_equal(param[-2], param[-1])\n\n\ndef math_equal_process_v2(param):\n    if param[-2] is None:\n        return False\n    return math_equal(param[-2], param[-1])\n\n\ndef symbolic_equal(a, b):\n\n    def _parse(s):\n        for f in [parse_latex, parse_expr]:\n            try:\n                return f(s)\n            except Exception:\n                pass\n        return s\n\n    a = _parse(a)\n    b = _parse(b)\n\n    try:\n        if simplify(a - b) == 0:\n            return True\n    except Exception:\n        pass\n\n    try:\n        if isclose(N(a), N(b), rel_tol=1e-3):\n            return True\n    except Exception:\n        pass\n    return False\n\n\ndef symbolic_equal_process(a, b, output_queue):\n    result = symbolic_equal(a, b)\n    output_queue.put(result)\n\n\ndef call_with_timeout(func, *args, timeout=1, **kwargs):\n    output_queue = multiprocessing.Queue()\n    process_args = args + (output_queue,)\n    process = multiprocessing.Process(target=func, args=process_args, kwargs=kwargs)\n    process.start()\n    process.join(timeout)\n\n    if process.is_alive():\n        process.terminate()\n        process.join()\n        return False\n\n    return output_queue.get()\n\n\ndef math_majority_vote(answers: list, majority: Optional[int] = None):\n    # threshold = len(answers) // 2 + 1\n    ans2cnt, ans2idx = Counter(), defaultdict(list)\n    for i, ans in enumerate(answers):\n        if isinstance(ans, str) and ans.strip():\n            for key in ans2cnt.keys():\n                if math_equal(ans, key):\n                    ans2cnt[key] += 1\n                    ans2idx[key].append(i)\n                    break\n            else:\n                ans2cnt[ans] += 1\n                ans2idx[ans].append(i)\n    if ans2cnt:\n        maj, cnt = ans2cnt.most_common(1)[0]\n        if maj and cnt >= (majority or 1):\n            return maj, ans2idx[maj]\n    return None, []\n"
  },
  {
    "path": "oreal/utils.py",
    "content": "import importlib.util\nimport os\nimport types\n\n\nclass ConfigDict(dict):\n\n    def __getattr__(self, item):\n        if item in self:\n            return self[item]\n        raise AttributeError(f\"'ConfigDict' object has no attribute '{item}'\")\n\n    def __setattr__(self, key, value):\n        self[key] = value\n\n\nclass Config:\n\n    @staticmethod\n    def fromfile(file_path):\n        config_dict = ConfigDict()\n        if not os.path.isfile(file_path):\n            raise FileNotFoundError(f\"Config file not found: {file_path}\")\n\n        # Load the configuration file as a module\n        spec = importlib.util.spec_from_file_location(\"config_module\", file_path)\n        config_module = importlib.util.module_from_spec(spec)\n        spec.loader.exec_module(config_module)\n\n        # Function to convert nested dictionaries to ConfigDict recursively\n        def convert_to_config_dict(d):\n            if isinstance(d, dict):\n\n                config_dict = ConfigDict()\n                for key, value in d.items():\n                    if isinstance(value, dict):\n                        config_dict[key] = convert_to_config_dict(value)\n                    else:\n                        config_dict[key] = value\n                return config_dict\n            else:\n                return d\n\n        # Retrieve all attributes (variables) from the module\n        for attribute_name in dir(config_module):\n            if not attribute_name.startswith(\"__\"):\n                config_dict[attribute_name] = convert_to_config_dict(\n                    getattr(config_module, attribute_name)\n                )\n        for key, value in list(config_dict.items()):\n            if isinstance(value, (types.FunctionType, types.ModuleType)):\n                config_dict.pop(key)\n        return config_dict\n"
  },
  {
    "path": "requirements.text",
    "content": "fire\nflash-attn\ntorch>=2.5.0\nxtuner[all]==0.2.0rc0\n"
  },
  {
    "path": "train_oreal.py",
    "content": "# Copyright (c) InternLM. All rights reserved.\nimport json\nimport os\nimport sys\nimport time\nfrom collections import OrderedDict\nfrom contextlib import nullcontext\nfrom datetime import datetime, timedelta\n\nimport fire\nimport torch\nimport torch.distributed as dist\nfrom mmengine import mkdir_or_exist\nfrom mmengine.runner import set_random_seed\nfrom mmengine.utils import get_git_hash\nfrom mmengine.utils.dl_utils import collect_env\nfrom torch.nn import functional as F\nfrom torch.optim import AdamW\nfrom torch.optim.lr_scheduler import CosineAnnealingLR, LambdaLR\nfrom torch.utils.data import DataLoader\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\nfrom transformers.utils.import_utils import is_flash_attn_2_available\nfrom xtuner._lite import get_device, get_logger, get_torch_device_module\nfrom xtuner._lite.accelerate import profile_time_and_memory, unpack_sequence\nfrom xtuner._lite.algorithms.sft import SftCollator\nfrom xtuner._lite.modelings import register_remote_code\nfrom xtuner._lite.parallel import (\n    ParallelSampler,\n    setup_parallel,\n    split_for_sequence_parallel,\n)\nfrom xtuner._lite.patches import AutoPatch, FSDPConfig\nfrom xtuner._lite.patches.utils import pad_to_max_length, pad_to_multiple_of\n\nfrom oreal.datasets import (\n    InferDataset,\n    OrealPromptDataset,\n    PromptCollator,\n    TrajectoryCollator,\n    TrajectoryDataset,\n    TrajectoryDatasetWithFilter,\n)\nfrom oreal.judgers import ParallelRouter\nfrom oreal.utils import Config\n\nlogger = get_logger()\n\nDEVICE = get_device()\nDEVICE_MODULE = get_torch_device_module()\n\n\ntorch._dynamo.config.cache_size_limit = 16384\n\n\nclass RLParallelSampler(ParallelSampler):\n    def __iter__(self):\n        \"\"\"Iterate the indices.\"\"\"\n        # deterministically shuffle based on epoch and seed\n        if self.shuffle:\n            g = torch.Generator()\n            g.manual_seed(self.seed + self.epoch)\n            indices = torch.randperm(len(self.dataset), generator=g).tolist()\n        else:\n            indices = torch.arange(len(self.dataset)).tolist()\n\n        # add extra samples to make it evenly divisible\n        if self.round_up:\n            indices = (indices * int(self.total_size / len(indices) + 1))[\n                : self.total_size\n            ]\n\n        # subsample\n        chunk_size = len(indices) // self.world_size\n        start = self.rank * chunk_size\n        end = start + chunk_size\n        indices = indices[start:end]\n\n        return iter(indices[self.step :])\n\n\ndef log_format(rank, debug=False):\n\n    formatter = f\"[XTuner][RANK {rank}]\"\n    formatter += \"[{time:YYYY-MM-DD HH:mm:ss}][<level>{level}</level>]\"\n\n    if debug:\n        formatter += \"[<cyan>{name}</cyan>:\"\n        formatter += \"<cyan>{function}</cyan>:\"\n        formatter += \"<cyan>{line}</cyan>]\"\n\n    formatter += \" <level>{message}</level>\"\n    return formatter\n\n\ndef is_interval(step, total_steps, interval):\n    return (step + 1) % interval == 0 or (step + 1) == total_steps\n\n\ndef reduce_mean(data, group):\n    data_tensor = torch.tensor(data, device=DEVICE)\n    dist.all_reduce(data_tensor, op=dist.ReduceOp.AVG, group=group)\n    return data_tensor.item()\n\n\ndef threshold_rescale(prob, threshold=0.5):\n    prob = prob - threshold\n    prob = prob / (1 - threshold)\n    prob = prob.clamp(0, 1)\n    return prob\n\n\ndef topk_rescale(prob, topk_ratio=0.5):\n    topk_num = int(prob.numel() * topk_ratio)\n    values, indices = torch.topk(prob, topk_num)\n    result = torch.zeros_like(prob)\n    if values.max() != values.min():\n        normalized_values = (values - values.min()) / (values.max() - values.min())\n    else:\n        normalized_values = torch.ones_like(values)\n    result[indices] = normalized_values\n    return result\n\n\ndef train_oreal(cfg_path, **kwargs):\n    args = Config.fromfile(cfg_path)\n    args.update(kwargs)\n\n    ###########################################################################\n    #                           1. Environment                                #\n    ###########################################################################\n    register_remote_code()\n\n    setup_parallel()\n    set_random_seed(args.seed)\n\n    rank = dist.get_rank()\n\n    timestamp = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n\n    objects = [timestamp]\n    dist.broadcast_object_list(objects, src=0)\n    timestamp = objects[0]\n\n    args.work_dir = os.path.join(args.work_dir, timestamp)\n    mkdir_or_exist(args.work_dir)\n\n    log_file = os.path.join(args.work_dir, f\"rank{rank}.log\")\n\n    # Change the log format printed in the terminal\n    lvl = \"DEBUG\" if args.debug else \"INFO\"\n    logger.remove()\n    logger.add(sys.stderr, level=lvl, format=log_format(rank, args.debug))\n    # Change the format saved in the log file\n    logger.add(log_file, format=log_format(rank), backtrace=True, catch=True)\n\n    logger.info(args)\n    if rank == 0:\n        env = collect_env()\n        import transformers\n        import xtuner\n\n        env[\"Transformers\"] = transformers.__version__\n        env[\"XTuner\"] = f\"{xtuner.__version__}+{get_git_hash(digits=6)}\"\n        runtime_env = OrderedDict()\n        runtime_env.update(env)\n        runtime_env[\"Seed\"] = args.seed\n        runtime_env[\"World Size\"] = dist.get_world_size()\n\n        runtime_env_info = \"\\n    \" + \"\\n    \".join(f\"{k}: {v}\" for k, v in runtime_env.items())\n        dash_line = \"-\" * 60\n        logger.info(\"\\n\" + dash_line + \"\\nRuntime environment:\" + runtime_env_info + \"\\n\" + dash_line + \"\\n\")\n    # -------------------    Environment  End  ------------------------------ #\n\n    ###########################################################################\n    #                          3. FSDP                                        #\n    ###########################################################################\n    if args.dtype == \"auto\":\n        args.dtype = \"bf16\" if DEVICE_MODULE.is_bf16_supported() else \"fp16\"\n\n    if args.dtype == \"fp16\":\n        dtype = torch.float16\n    elif args.dtype == \"bf16\":\n        if DEVICE_MODULE.is_bf16_supported():\n            dtype = torch.bfloat16\n        else:\n            raise RuntimeError(\"The device does not support `bf16`, \" \"please set `dtype` to `fp16`.\")\n    else:\n        raise RuntimeError(\"`dtype` only supports `fp16`, `bf16` or `auto`, \" f\"but found {args.dtype}.\")\n\n    with torch.device(\"meta\"):\n        # In order to save CPU memory and GPU memory,\n        # initialize an empty complete model on all ranks first.\n        # At the same time, a non-empty complete model will be loaded\n        # on the CPU of rank0.\n        # After the model is parallelized, the parameters of the complete\n        # model on rank0 will be loaded.\n        actor_model = AutoModelForCausalLM.from_pretrained(args.actor, attn_implementation=\"flash_attention_2\", torch_dtype=dtype)\n\n        for module in actor_model.modules():\n            for p_name, param in module.named_parameters(recurse=False):\n                if param.requires_grad:\n                    param_fp32 = torch.nn.Parameter(param.to(dtype=torch.float32))\n                    setattr(module, p_name, param_fp32)\n\n        ref_model = AutoModelForCausalLM.from_pretrained(args.reference, attn_implementation=\"flash_attention_2\", torch_dtype=dtype)\n\n        for param in ref_model.parameters():\n            param.requires_grad = False\n\n        if args.token_level_rm is not None:\n            token_level_rm = AutoModelForCausalLM.from_pretrained(\n                args.token_level_rm, attn_implementation=\"flash_attention_2\", torch_dtype=dtype\n            )\n            # replace the language model head with a reward model linear head\n            token_level_rm.lm_head = torch.nn.Linear(token_level_rm.config.hidden_size, 1, bias=False)\n\n            for module in token_level_rm.modules():\n                for p_name, param in module.named_parameters(recurse=False):\n                    if param.requires_grad:\n                        # Ensure all numerical values in the optimizer are fp32.\n                        # Don't worry about speed, FSDP will use `dtype`\n                        # during forward.\n                        param_fp32 = torch.nn.Parameter(param.to(dtype=torch.float32))\n                        setattr(module, p_name, param_fp32)\n\n    with profile_time_and_memory(\"[Parallelize Actor]\"):\n        actor_model = AutoPatch.from_causal_lm(\n            actor_model,\n            fsdp_config=FSDPConfig(\n                tp_size=args.tp_size,\n                sp_size=args.sp_size,\n                param_dtype=dtype,\n                reduce_dtype=dtype,\n                cpu_offload=args.cpu_offload,\n                reshard_after_forward=False,\n                mesh_prefix=\"actor\",\n            ),\n        )\n    dist.barrier()\n\n    with profile_time_and_memory(\"[Parallelize Reference]\"):\n        ref_model = AutoPatch.from_causal_lm(\n            ref_model,\n            fsdp_config=FSDPConfig(\n                tp_size=args.tp_size,\n                sp_size=args.sp_size,\n                param_dtype=dtype,\n                reduce_dtype=dtype,\n                cpu_offload=args.cpu_offload,\n                reshard_after_forward=True,\n                mesh_prefix=\"ref\",\n            ),\n        )\n    dist.barrier()\n\n    if args.token_level_rm is not None:\n        with profile_time_and_memory(\"[Parallelize Reward]\"):\n            token_level_rm = AutoPatch.from_causal_lm(\n                token_level_rm,\n                fsdp_config=FSDPConfig(\n                    tp_size=args.tp_size,\n                    sp_size=args.sp_size,\n                    param_dtype=dtype,\n                    reduce_dtype=dtype,\n                    cpu_offload=args.cpu_offload,\n                    reshard_after_forward=True,\n                    mesh_prefix=\"reward\",\n                ),\n            )\n            token_level_rm.train()\n            # print head weight\n            # logger.info(f\"Rank {rank} Reward model head weight: {token_level_rm.patched_model.lm_head.weight}\")\n    dist.barrier()\n    # --------------------------    FSDP  End  ------------------------------ #\n\n    ###########################################################################\n    #                     2. Dataset & Dataloader                             #\n    ###########################################################################\n    actor_sp_mesh = actor_model.sequence_parallel_mesh\n    actor_dp_mesh = actor_model.data_parallel_mesh\n    actor_data_mesh = actor_model.data_mesh\n    actor_dp_size = actor_dp_mesh.size()\n\n    actor_sp_size = actor_sp_mesh.size()\n\n    prompt_global_batch = args.gen_global_batch // args.prompt_repeat_k\n\n    tokenizer = AutoTokenizer.from_pretrained(args.actor, trust_remote_code=True, padding_side=\"right\")\n\n    if args.chat_template is not None:\n        if rank == 0:\n            logger.info(f\"[CHAT_TEMPLATE] {args.chat_template}\")\n        tokenizer.chat_template = args.chat_template\n\n    stop_token_ids = []\n    word_ids = tokenizer.encode(args.stop_word, add_special_tokens=False)\n    if len(word_ids) > 1:\n        raise NotImplementedError(\"The stop word must be a single token.\")\n    stop_token_ids.append(word_ids[0])\n\n    with profile_time_and_memory(\"[Dataset & Dataloader]\"):\n\n        prompt_dataset = OrealPromptDataset(\n            args.datasets,\n            tokenizer,\n            difficulty_balance_cfg=args.data_difficulty_balance_cfg,\n        )\n        if rank == 0:\n            logger.info(f\"[Dataset] {len(prompt_dataset)} prompts.\")\n\n        assert is_flash_attn_2_available()\n        prompt_collator = PromptCollator(pack_batch=True)\n        prompt_sampler = ParallelSampler(prompt_dataset, actor_dp_mesh, prompt_global_batch, shuffle=True)\n\n        prompt_dataloader = DataLoader(\n            prompt_dataset,\n            batch_size=prompt_global_batch // actor_dp_mesh.size(),\n            num_workers=args.num_workers,\n            # Ensure to round up or drop last based on the `global_batch_size`,\n            # if you want to replace a custom sampler.\n            sampler=prompt_sampler,\n            collate_fn=prompt_collator,\n            persistent_workers=args.num_workers > 0,\n        )\n\n        if rank == 0:\n            logger.info(f\"[Dataloader] {len(prompt_dataloader)} batches.\")\n            _first_batch = [prompt_dataset[i] for i in range(prompt_global_batch)]\n            logger.debug(f\"[Dataloader] Training Batch:\\n{_first_batch}\")\n\n    dist.barrier()\n    # -------------------    Dataset & Dataloader  End  --------------------- #\n\n    # ---------------------    Router  Start  ------------------------------- #\n    judger_router = ParallelRouter(\n        judgers_config=args.judgers_config,\n        data_judger_mapping=args.data_judger_mapping,\n        logger=logger,\n    )\n\n    ###########################################################################\n    #                      4. Optimizer & Scheduler                           #\n    ###########################################################################\n    actor_params = [p for p in actor_model.parameters() if p.requires_grad]\n    actor_optimizer = AdamW(actor_params, lr=args.actor_lr, weight_decay=args.wd)\n\n    if args.token_level_rm is not None:\n        token_rm_params = [p for p in token_level_rm.parameters() if p.requires_grad]\n        token_rm_optimizer = AdamW(token_rm_params, lr=args.token_level_rm_lr, weight_decay=args.wd)\n\n    total_steps = args.total_steps\n    if total_steps > len(prompt_dataloader):\n        logger.warning(f\"Total steps {total_steps} is greater than the number of prompts {len(prompt_dataloader)}, set to dataloader length.\")\n        total_steps = len(prompt_dataloader)\n\n    warmup_steps = args.warmup_steps\n    rm_warmup_steps = args.get(\"rm_warmup_steps\", warmup_steps)\n    lr_min = args.get(\"actor_min_lr\", args.actor_lr)\n    token_level_rm_lr_min = args.get(\"token_level_rm_lr_min\", args.token_level_rm_lr)\n\n    if args.checkpoint_interval == -1:\n        checkpoint_interval = total_steps\n    elif args.checkpoint_interval < 1:\n        checkpoint_interval = int(total_steps * args.checkpoint_interval)\n    else:\n        checkpoint_interval = int(args.checkpoint_interval)\n\n    def warmup_fn(x):\n        return x / warmup_steps if x < warmup_steps else 1\n\n    warmup_scheduler = LambdaLR(actor_optimizer, warmup_fn)\n    cosine_scheduler = CosineAnnealingLR(actor_optimizer, T_max=total_steps - warmup_steps, eta_min=lr_min)\n\n    if args.token_level_rm is not None:\n\n        def rm_warmup_fn(x):\n            return x / rm_warmup_steps if x < rm_warmup_steps else 1\n\n        token_rm_warmup_scheduler = LambdaLR(token_rm_optimizer, rm_warmup_fn)\n        token_rm_cosine_scheduler = CosineAnnealingLR(token_rm_optimizer, T_max=total_steps - rm_warmup_steps, eta_min=token_level_rm_lr_min)\n\n    # ----------------    Optimizer & Scheduler End   ----------------------- #\n\n    ###########################################################################\n    #                          5. Training                                    #\n    ###########################################################################\n\n    if args.filter_trajectory:\n        trajectory_dataset = TrajectoryDatasetWithFilter(repeat_k=args.prompt_repeat_k)\n    else:\n        trajectory_dataset = TrajectoryDataset()\n\n    prompt_iterator = iter(prompt_dataloader)\n\n    start_step = 0\n    start_train_t = time.time()\n    DEVICE_MODULE.empty_cache()\n    DEVICE_MODULE.reset_peak_memory_stats()\n    max_memory = DEVICE_MODULE.max_memory_allocated()\n    logger.info(\"[Train] Begin Train Loop. The current GPU memory is \" f\"{(max_memory / 1024**3):.1f}GB\")\n\n    for step in range(start_step, total_steps):\n\n        if step <= warmup_steps:\n            warmup_scheduler.step()\n            cur_lr = warmup_scheduler.get_last_lr()[0]\n            if args.token_level_rm is not None:\n                token_rm_warmup_scheduler.step()\n                token_rm_cur_lr = token_rm_warmup_scheduler.get_last_lr()[0]\n        else:\n            cosine_scheduler.step()\n            cur_lr = cosine_scheduler.get_last_lr()[0]\n            if args.token_level_rm is not None:\n                token_rm_cosine_scheduler.step()\n                token_rm_cur_lr = token_rm_cosine_scheduler.get_last_lr()[0]\n\n        DEVICE_MODULE.reset_peak_memory_stats()\n\n        step_kl_penalty_loss = 0\n        step_rl_loss = 0\n        step_token_level_rm_loss = 0\n        step_start_t = time.time()\n        step_positive_loss = 0\n        step_negative_loss = 0\n\n        if step < args.actor_freeze_steps:\n            # Only update the parameters of the token-level reward model\n            update_actor = False\n        else:\n            update_actor = True\n\n        DEVICE_MODULE.reset_peak_memory_stats()\n\n        data = next(prompt_iterator)\n        prompt_input_ids = unpack_sequence(data[\"input_ids\"].to(DEVICE), data[\"num_tokens\"])\n        infer_num_tokens = data[\"num_tokens\"].to(DEVICE)\n        # repeat prompt for k times\n        prompt_input_ids = [p for p in prompt_input_ids for _ in range(args.prompt_repeat_k)]  # AAAABBBBCCCC\n        infer_num_tokens = torch.Tensor([n for n in infer_num_tokens for _ in range(args.prompt_repeat_k)])\n        message_data = [m for m in data[\"message_data\"] for _ in range(args.prompt_repeat_k)]\n        metadata = [m for m in data[\"metadata\"] for _ in range(args.prompt_repeat_k)]\n\n        # Stage 1,  Actor Model Generation\n        step_avg_new_tokens = 0\n        step_gen_start_t = time.time()\n\n        actor_model.eval()\n        # During the generation stage, sequence parallelism was not used,\n        # even when the sp size is greater than 1.\n        # Per sp rank processes different prompts in parallel.\n        responses = actor_model.generate(\n            prompt_input_ids,\n            stop_token_ids,\n            max_length=args.gen_max_length,\n            max_batch_size=len(prompt_input_ids),\n            max_prefill_batch=args.max_prefill_batch,\n            max_new_tokens=args.gen_max_new,\n            do_sample=args.gen_do_sample,\n            top_k=args.gen_top_k,\n            top_p=args.gen_top_p,\n            temperature=args.temperature,\n            cuda_graph=args.cuda_graph,\n        )\n\n        # decode responses\n        response_texts = [tokenizer.decode(res, skip_special_tokens=False) for res in responses]\n\n        actor_model.train()\n        dist.barrier()\n\n        step_avg_new_tokens = sum([len(res) for res in responses]) / len(responses)\n        step_gen_time = time.time() - step_gen_start_t\n\n        prompt_input_ids = [p[0].tolist() for p in prompt_input_ids]\n\n        # Stage 2,  Infer\n        step_infer_start_t = time.time()\n        step_infer_consumed_tokens = 0\n\n        # submit to judger\n        if actor_data_mesh.get_local_rank() == 0:\n            submit_batch = []\n            for i in range(len(message_data)):\n                submit_batch.append(\n                    {\n                        \"prompt_messages\": message_data[i],\n                        \"completion_messages\": [{\"role\": \"assistant\", \"content\": response_texts[i]}],\n                        \"metadata\": metadata[i],\n                    }\n                )\n            token, indexes_for_local = judger_router.submit(submit_batch)\n\n        # `infer_dataset` varies at each dp rank, there is no need to\n        # use the parallel sampler.\n        infer_dataset = InferDataset(\n            prompt_input_ids,\n            responses,\n            message_data,\n            metadata,\n        )\n        infer_dataloader = DataLoader(\n            infer_dataset,\n            batch_size=args.rl_mirco_batch,\n            num_workers=0,\n            collate_fn=SftCollator(pack_batch=True),\n            shuffle=False,\n            persistent_workers=False,\n        )\n\n        policies = []\n        for infer_packed_seq in infer_dataloader:\n            # labels are already shifted in InferDataset\n            infer_labels = infer_packed_seq[\"labels\"].to(DEVICE)\n            infer_input_ids = infer_packed_seq[\"input_ids\"].to(DEVICE)\n            infer_num_tokens = infer_packed_seq[\"num_tokens\"].to(DEVICE)\n            infer_batch_size = infer_num_tokens.numel()\n\n            step_infer_consumed_tokens += infer_num_tokens.sum() / actor_data_mesh.size()\n\n            unpacked_input_ids = unpack_sequence(infer_input_ids, infer_num_tokens, dim=1)\n            unpacked_labels = unpack_sequence(infer_labels, infer_num_tokens, dim=1)\n\n            for i in range(infer_batch_size):\n                assert unpacked_input_ids[i].numel() == infer_num_tokens[i]\n                assert unpacked_labels[i].numel() == infer_num_tokens[i]\n\n                _policy = {\n                    \"input_ids\": unpacked_input_ids[i].flatten().tolist(),\n                    \"labels\": unpacked_labels[i].flatten().tolist(),\n                    \"num_tokens\": infer_num_tokens[i].item(),\n                }\n                _policy[\"sequence_text\"] = tokenizer.decode(_policy[\"input_ids\"], skip_special_tokens=False)\n                policies.append(_policy)\n\n        step_infer_time = time.time() - step_infer_start_t\n\n        # ------------------------------------------------------------- #\n        # --------------------------Get Judger Reward------------------ #\n        # ------------------------------------------------------------- #\n        # query results from judger\n        if actor_data_mesh.get_local_rank() == 0:\n            while True:\n                try:\n                    judger_results = judger_router.query(token, timeout=3)\n                    logger.info(f\"Query judger results: {judger_results}\")\n                    break\n                except TimeoutError as e:\n                    logger.info(f\"Judger query timeout: {e}. Will retry\")\n            judger_rewards = [list(r.values())[0] for r in judger_results]\n            judger_rewards = [r if r is not None else -1.0 for r in judger_rewards]\n            judger_rewards = torch.tensor(judger_rewards, dtype=torch.float32).to(DEVICE)\n        else:\n            judger_rewards = torch.tensor([0] * len(policies), dtype=torch.float32).to(DEVICE)\n\n        dist.barrier()\n        # broadcast judger rewards to same data mesh\n        dist.all_reduce(judger_rewards, op=dist.ReduceOp.SUM, group=actor_data_mesh.get_group())\n\n        # reward shaping, use GRPO or RLOO to normalize rewards\n        _rewards = judger_rewards.reshape(-1, args.prompt_repeat_k).T\n        if args.reward_shaping_type == \"rloo\":\n            baseline = (_rewards.sum(0) - _rewards) / (args.prompt_repeat_k - 1)\n            judger_advantages = _rewards - baseline\n        elif args.reward_shaping_type == \"grpo\":\n            judger_advantages = (_rewards - _rewards.mean(0)) / (_rewards.std(0) + 1e-8)\n        else:\n            raise NotImplementedError(f\"Reward shaping type {args.reward_shaping_type} is not implemented.\")\n        judger_advantages = judger_advantages.T.flatten()\n        # update policies\n        assert len(judger_rewards) == len(policies)\n        for i in range(len(policies)):\n            policies[i][\"judger_reward\"] = judger_rewards[i].item()\n            policies[i][\"judger_advantage\"] = judger_advantages[i].item()\n\n        # ------------------------------------------------------------- #\n        # --------------------------Stage 4, RL------------------------ #\n        # ------------------------------------------------------------- #\n        # Stage 4, RL\n        step_rl_start_t = time.time()\n\n        _global_policies = [None] * actor_dp_size\n        dist.all_gather_object(_global_policies, policies, actor_dp_mesh.get_group())\n\n        global_policies = []\n        for _rank_policies in _global_policies:\n            global_policies.extend(_rank_policies)\n\n        trajectory_dataset.update(global_policies)\n        if rank == 0:\n            # dump trajectory\n            _buffer_dir = os.path.join(args.work_dir, \"trajectories\")\n            mkdir_or_exist(_buffer_dir)\n            _buffer_file = os.path.join(_buffer_dir, f\"step.{step}.jsonl\")\n            trajectory_dataset.dump_jsonl(_buffer_file, tokenizer, args.debug)\n            _buffer_log_file = os.path.join(_buffer_dir, f\"step.{step}.log\")\n            trajectory_dataset.dump_log(_buffer_log_file, tokenizer, args.debug)\n\n        rl_global_batch = args.rl_global_batch\n        if args.filter_trajectory:\n            _world_size = actor_dp_mesh.size()\n            _data_size = len(trajectory_dataset)\n            # train_global_batch is divisible by world_size\n            rl_global_batch = _data_size // _world_size * _world_size\n\n        rl_loader = DataLoader(\n            trajectory_dataset,\n            batch_size=args.rl_mirco_batch,\n            num_workers=0,\n            collate_fn=TrajectoryCollator(pack_batch=True),\n            shuffle=False,\n            sampler=RLParallelSampler(trajectory_dataset, actor_dp_mesh, rl_global_batch, shuffle=False),\n            persistent_workers=False,\n        )\n\n        # Count the total number of tokens used for training RL on all ranks\n        # It is necessary for `per-token` loss, otherwise the number of tokens\n        # for each backward is unbalanced.\n        global_action_tokens = trajectory_dataset.num_action_tokens\n        global_positive_tokens = sum(\n            [(torch.tensor(t[\"labels\"]) >= 0).sum().item() for t in trajectory_dataset._trajectories if t[\"judger_reward\"] > 0]\n        )\n        global_negative_tokens = global_action_tokens - global_positive_tokens\n        global_num_seqs = len(trajectory_dataset._trajectories)\n\n        step_avg_judger_reward = sum([t[\"judger_reward\"] for t in global_policies]) / len(global_policies)\n        step_sum_gen_entropy = 0\n        step_sum_ref_kl = 0\n        step_action_tokens = 0\n        step_rl_consumed_tokens = 0\n\n        step_sum_adv = 0\n\n        for packed_policy in rl_loader:\n\n            rl_input_ids = packed_policy[\"input_ids\"].to(DEVICE)\n            rl_num_tokens = packed_policy[\"num_tokens\"].to(DEVICE)\n            assert rl_input_ids.numel() == rl_num_tokens.sum()\n            rl_batch_size = rl_num_tokens.numel()\n            # labels are already shifted in InferDataset\n            rl_labels = packed_policy[\"labels\"].to(DEVICE)\n\n            judger_rewards = torch.Tensor(packed_policy[\"judger_rewards\"]).to(DEVICE)  # shape: (rl_mirco_batch, )\n            judger_advantages = torch.Tensor(packed_policy[\"judger_advantages\"]).to(DEVICE)  # shape: (rl_mirco_batch, )\n\n            actor_input_ids = rl_input_ids.clone()\n            actor_labels = rl_labels.clone()\n            actor_num_tokens = rl_num_tokens.clone().tolist()\n\n            actor_cu_seq_lens = torch.cumsum(torch.IntTensor([0] + actor_num_tokens), dim=0).to(DEVICE).int()\n            actor_position_ids = [torch.arange(num) for num in actor_num_tokens]\n            actor_position_ids = torch.cat(actor_position_ids, dim=0).to(DEVICE).unsqueeze_(0)\n\n            with nullcontext() if update_actor else torch.no_grad():\n                packed_actor_logits = actor_model(\n                    input_ids=actor_input_ids,\n                    position_ids=actor_position_ids,\n                    use_cache=False,\n                    cu_seq_lens_q=actor_cu_seq_lens,\n                    cu_seq_lens_k=actor_cu_seq_lens,\n                    max_length_q=max(actor_num_tokens),\n                    max_length_k=max(actor_num_tokens),\n                    sequence_parallel_mesh=actor_sp_mesh,\n                ).logits\n\n            # -------sft loss--------\n            # calculate sft loss on each sp(tp) rank and then gather them to dp rank, avoid gather logits which may lead to OOM\n            if actor_model.fsdp_config.torch_compile:\n                _actor_labels = pad_to_max_length(actor_labels, -100, actor_model.fsdp_config.max_length, 1)\n            else:\n                if actor_sp_mesh and actor_sp_mesh.size() > 1:\n                    multiple_of = actor_sp_mesh.size() * actor_model.tp_mesh.size()\n                else:\n                    multiple_of = actor_model.tp_mesh.size()\n                _actor_labels = pad_to_multiple_of(actor_labels, -100, multiple_of, 1)\n\n            if actor_sp_mesh and actor_sp_mesh.size() > 1:\n                _actor_labels = split_for_sequence_parallel(_actor_labels, dim=1, sp_mesh=actor_sp_mesh)\n\n            if actor_model.tp_mesh.size() > 1:\n                _actor_labels = split_for_sequence_parallel(_actor_labels, dim=1, sp_mesh=actor_model.tp_mesh)\n            packed_sft_loss = F.cross_entropy(packed_actor_logits.squeeze(), _actor_labels.squeeze(), reduction=\"none\").unsqueeze(\n                0\n            )  # shape: 1, seqlen\n\n            if actor_model.tp_mesh.size() > 1:\n                _packed_sft_loss = dist.nn.all_gather(packed_sft_loss, group=actor_model.tp_mesh.get_group())\n                packed_sft_loss = torch.cat(_packed_sft_loss, dim=1)\n\n            if actor_sp_mesh and actor_sp_mesh.size() > 1:\n                _packed_sft_loss = dist.nn.all_gather(packed_sft_loss, group=actor_sp_mesh.get_group())\n                packed_sft_loss = torch.cat(_packed_sft_loss, dim=1)\n\n            packed_sft_loss = packed_sft_loss[:, : actor_labels.size(1)]\n\n            # The labels of prefill tokens and last token are -100.\n            # HACK: (for sp) The -100 part takes the value of 0,\n            # this part will be masked later.\n            packed_logprobs = actor_model.gather_logprobs(packed_actor_logits, actor_labels.clip(0), actor_sp_mesh)\n\n            logprobs = unpack_sequence(packed_logprobs, actor_num_tokens, dim=1)\n            sft_loss = unpack_sequence(packed_sft_loss, actor_num_tokens, dim=1)\n\n            ref_input_ids = rl_input_ids.clone()\n            ref_labels = rl_labels.clone()\n            ref_num_tokens = rl_num_tokens.clone().tolist()\n\n            ref_cu_seq_lens = torch.cumsum(torch.IntTensor([0] + ref_num_tokens), dim=0).to(DEVICE).int()\n            ref_position_ids = [torch.arange(num) for num in ref_num_tokens]\n            ref_position_ids = torch.cat(ref_position_ids, dim=0).to(DEVICE).unsqueeze_(0)\n\n            with torch.no_grad():\n                packed_ref_logits = ref_model(\n                    input_ids=ref_input_ids,\n                    position_ids=ref_position_ids,\n                    use_cache=False,\n                    cu_seq_lens_q=ref_cu_seq_lens,\n                    cu_seq_lens_k=ref_cu_seq_lens,\n                    max_length_q=max(ref_num_tokens),\n                    max_length_k=max(ref_num_tokens),\n                    sequence_parallel_mesh=actor_sp_mesh,\n                ).logits\n\n            if args.token_level_rm is not None:\n                # assert ref_num_tokens.sum() == ref_input_ids.numel() * 8, f\"{ref_num_tokens}, {_num_pad}, {ref_input_ids.numel()}, {rl_input_ids.numel()}\"\n                packed_rm_logits = token_level_rm(\n                    input_ids=ref_input_ids,\n                    position_ids=ref_position_ids,\n                    use_cache=False,\n                    cu_seq_lens_q=ref_cu_seq_lens,\n                    cu_seq_lens_k=ref_cu_seq_lens,\n                    max_length_q=max(ref_num_tokens),\n                    max_length_k=max(ref_num_tokens),\n                    sequence_parallel_mesh=actor_sp_mesh,\n                ).logits\n                # use last token logits as reward logits\n                packed_rm_logits = packed_rm_logits[:, :, 0]  # TODO: replace with auto path rm\n                if token_level_rm.tp_mesh.size() > 1:\n                    _packed_rm_logits = dist.nn.all_gather(packed_rm_logits, group=token_level_rm.tp_mesh.get_group())\n                    packed_rm_logits = torch.cat(_packed_rm_logits, dim=1)\n                if actor_sp_mesh and actor_sp_mesh.size() > 1:\n                    _packed_rm_logits = dist.nn.all_gather(packed_rm_logits, group=actor_sp_mesh.get_group())\n                    packed_rm_logits = torch.cat(_packed_rm_logits, dim=1)\n                packed_rm_logits = packed_rm_logits[:, : actor_labels.size(1)]\n                rm_logits = unpack_sequence(packed_rm_logits, ref_num_tokens, dim=1)\n\n            # The labels of prefill tokens and last token are -100.\n            # HACK: (for sp) The -100 part takes the value of 0,\n            # this part will be masked later.\n            packed_ref_logprobs = ref_model.gather_logprobs(packed_ref_logits, ref_labels.clip(0), actor_sp_mesh)\n            ref_logprobs = unpack_sequence(packed_ref_logprobs, ref_num_tokens, dim=1)\n            unpacked_labels = unpack_sequence(rl_labels, rl_num_tokens, dim=1)\n\n            _positive_losses = []\n            _negative_losses = []\n            _kl_penalty_losses = []\n            _token_level_rm_losses = []\n            _losses = []\n            for i in range(rl_batch_size):\n                _judger_reward = judger_rewards[i]\n                assert unpacked_labels[i].numel() == rl_num_tokens[i]\n                # from the last prefill token, to the second-to-last token (excluding the eos token)\n                _num_action_tokens = (unpacked_labels[i] >= 0).sum()\n\n                _logprobs = logprobs[i][0, -_num_action_tokens - 1 : -1]\n                _ref_logprobs = ref_logprobs[i][0, -_num_action_tokens - 1 : -1]\n\n                _old_logprobs = _logprobs.detach()\n                _judger_advantages = judger_advantages[i]\n\n                if args.token_level_rm is not None:\n                    # compute cumulative mean of rm scores\n                    _rm_scores = rm_logits[i][0, -_num_action_tokens - 1 : -1]\n                    _cum_mean_rm_scores = _rm_scores.cumsum(0).squeeze() / torch.arange(1, _num_action_tokens + 1).to(DEVICE)\n                    _seq_mean_rm_scores = _rm_scores.mean()\n\n                    # ----------token level rm loss (cross entropy)------------\n                    _rm_label = torch.tensor([int(max(_judger_reward, 0))]).to(DEVICE)\n                    _seq_mean_rm_scores = _seq_mean_rm_scores.reshape(_rm_label.shape)\n                    _token_level_rm_loss = F.binary_cross_entropy_with_logits(_seq_mean_rm_scores.float(), _rm_label.float(), reduction=\"none\")\n                    _token_level_rm_loss = _token_level_rm_loss.sum() * actor_dp_size / global_num_seqs\n                    _token_level_rm_losses.append(_token_level_rm_loss)\n\n                    # use probability to reweight policy loss\n                    _correct_prob = torch.sigmoid(_cum_mean_rm_scores).detach()\n                    _incorrect_prob = 1 - _correct_prob\n\n                    if args.get(\"threshold_rescale\", False):\n                        correct_threshold = args.get(\"correct_threshold\", 0.5)\n                        incorrect_threshold = args.get(\"incorrect_threshold\", 0.5)\n                        _pos_weight = threshold_rescale(_correct_prob, correct_threshold)\n                        _neg_weight = threshold_rescale(_incorrect_prob, incorrect_threshold)\n                    elif args.get(\"topk_rescale\", False):\n                        correct_topk_ratio = args.get(\"correct_topk_ratio\", 0.5)\n                        incorrect_topk_ratio = args.get(\"incorrect_topk_ratio\", 0.5)\n                        _pos_weight = topk_rescale(_correct_prob, correct_topk_ratio)\n                        _neg_weight = topk_rescale(_incorrect_prob, incorrect_topk_ratio)\n                    else:\n                        raise NotImplementedError(\"Only support threshold_rescale and topk_rescale.\")\n                else:\n                    _pos_weight, _neg_weight = 1.0, 1.0\n\n                # ----------positive loss (behavior cloning)------------\n                _positive_loss = sft_loss[i][0, -_num_action_tokens - 1 : -1]\n                _positive_loss = (_positive_loss * _pos_weight).sum()\n                if args.get(\"pos_mult_adv\", False):\n                    _positive_loss = _positive_loss * _judger_advantages\n                if _judger_reward > 0:\n                    _positive_loss = _positive_loss * actor_dp_size / global_positive_tokens * args.positive_loss_factor\n                else:\n                    # negative sample does not need sft loss\n                    _positive_loss = torch.zeros_like(_positive_loss)\n                _positive_losses.append(_positive_loss)\n\n                # ----------negative loss (policy gradient)------------\n                if _judger_reward > 0:\n                    # positive sample, does not need policy loss\n                    _negative_loss = torch.zeros_like(_positive_loss)\n                    _kl_penalty_loss = torch.zeros_like(_positive_loss)\n                    _negative_losses.append(_negative_loss)\n                else:\n                    _advantages = _judger_advantages * _neg_weight\n                    _negative_loss = torch.exp(_logprobs - _old_logprobs.detach()) * _advantages\n                    _negative_loss = -torch.sum(_negative_loss) * actor_dp_size / global_negative_tokens * args.negative_loss_factor\n                    _negative_losses.append(_negative_loss)\n\n                # ----------compute kl penalty------------\n                assert _logprobs.ndim == 1\n                kl_type = args.get(\"kl_type\", \"unbias\")  # kl, unbias, mse\n                if kl_type == \"kl\":\n                    kl = _ref_logprobs - _logprobs\n                    _kl_penalty_loss = (args.kl_coef * kl).sum() * actor_dp_size / global_action_tokens\n                elif kl_type == \"unbias\":\n                    kl = _ref_logprobs - _logprobs\n                    nonneg_nobias_kl = torch.exp(kl) - kl - 1\n                    _kl_penalty_loss = (args.kl_coef * nonneg_nobias_kl).sum() * actor_dp_size / global_action_tokens\n                elif kl_type == \"mse\":\n                    _kl_penalty_loss = (\n                        (args.kl_coef * (_ref_logprobs - _logprobs).square() / 2).sum() * actor_dp_size / global_action_tokens\n                    )\n                _kl_penalty_losses.append(_kl_penalty_loss)\n\n                # ----------compute total loss------------\n                _loss = _positive_loss + _negative_loss + _kl_penalty_loss\n                _losses.append(_loss)\n\n                step_sum_gen_entropy += -_old_logprobs.sum().item()\n                step_sum_ref_kl += (_old_logprobs - _ref_logprobs).sum().item()\n                step_sum_adv += _judger_advantages.sum().item()\n                step_action_tokens += _num_action_tokens.item()\n\n            loss = sum(_losses)\n            if update_actor:\n                loss.backward()\n\n            # for logging\n            step_positive_loss += sum(_positive_losses).item()\n            step_negative_loss += sum(_negative_losses).item()\n            step_kl_penalty_loss += sum(_kl_penalty_losses).item()\n            step_rl_loss += loss.item()\n            step_rl_consumed_tokens += rl_num_tokens.sum() / actor_data_mesh.size()\n\n            if args.token_level_rm is not None:\n                token_level_rm_loss = sum(_token_level_rm_losses)\n                token_level_rm_loss.backward()\n                step_token_level_rm_loss += token_level_rm_loss.item()\n\n        step_rl_time = time.time() - step_rl_start_t\n        step_avg_ref_kl = step_sum_ref_kl / step_action_tokens\n        step_avg_gen_entropy = step_sum_gen_entropy / step_action_tokens\n        step_avg_adv = step_sum_adv / step_action_tokens\n\n        actor_data_group = actor_data_mesh.get_group()\n        step_avg_ref_kl = reduce_mean(step_avg_ref_kl, actor_data_group)\n        step_avg_gen_entropy = reduce_mean(step_avg_gen_entropy, actor_data_group)\n        step_avg_adv = reduce_mean(step_avg_adv, actor_data_group)\n        step_avg_new_tokens = reduce_mean(step_avg_new_tokens, actor_data_group)\n\n        if update_actor:\n            actor_grad_norm = actor_model.clip_grad_norm(args.max_grad_norm)\n            actor_grad_norm = actor_grad_norm.item()\n            actor_optimizer.step()\n            actor_optimizer.zero_grad()\n        else:\n            actor_grad_norm = 0\n\n        if args.token_level_rm is not None:\n            token_rm_grad_norm = token_level_rm.clip_grad_norm(args.max_grad_norm)\n            token_rm_grad_norm = token_rm_grad_norm.item()\n            token_rm_optimizer.step()\n            token_rm_optimizer.zero_grad()\n\n        step_time = time.time() - step_start_t\n        eta = step_time * (total_steps - step)\n        eta = timedelta(seconds=int(eta))\n\n        infer_tgs = int(step_infer_consumed_tokens / step_infer_time)\n        rl_tgs = int(step_rl_consumed_tokens / step_rl_time)\n\n        actor_lr = cur_lr if update_actor else 0.0\n        max_memory = DEVICE_MODULE.max_memory_allocated()\n        log_dict = {\n            \"step\": step + 1,\n            \"actor_lr\": actor_lr,\n            \"actor_grad_norm\": actor_grad_norm,\n            \"token_level_rm_lr\": token_rm_cur_lr if args.token_level_rm is not None else 0.0,\n            \"token_rm_grad_norm\": token_rm_grad_norm if args.token_level_rm is not None else 0.0,\n            \"avg_judger_reward\": step_avg_judger_reward,\n            \"avg_adv\": step_avg_adv,\n            \"avg_gen_entropy\": step_avg_gen_entropy,\n            \"avg_ref_kl\": step_avg_ref_kl,\n            \"positive_loss\": step_positive_loss,\n            \"negative_loss\": step_negative_loss,\n            \"kl_penalty_loss\": step_kl_penalty_loss,\n            \"rl_loss\": step_rl_loss,\n            \"token_level_rm_loss\": step_token_level_rm_loss if args.token_level_rm is not None else 0.0,\n            \"max_memory\": max_memory / 1024**3,\n            \"avg_new_tokens\": step_avg_new_tokens,\n            \"num_rl_tokens\": step_rl_consumed_tokens,\n            \"infer_tgs\": infer_tgs,\n            \"rl_tgs\": rl_tgs,\n            \"gen_time\": step_gen_time,\n            \"infer_time\": step_infer_time,\n            \"rl_time\": step_rl_time,\n            \"total_time\": step_time,\n            \"eta\": eta.seconds,\n        }\n        for key, value in log_dict.items():\n            if isinstance(value, torch.Tensor):\n                log_dict[key] = value.item()\n        with open(os.path.join(args.work_dir, f\"rank{rank}.log.jsonl\"), \"a\") as f:\n            f.write(json.dumps(log_dict, ensure_ascii=False) + \"\\n\")\n\n        if is_interval(step, total_steps, args.log_interval):\n            logger.info(\n                \"[Train] Step \"\n                f\"{step + 1}/{total_steps}  \"\n                f\"actor_lr: {cur_lr:.3e}  \"\n                f\"actor_grad_norm: {actor_grad_norm:.3f}  \"\n                f\"token_level_rm_lr: {token_rm_cur_lr if args.token_level_rm is not None else 0.0:.3e}  \"\n                f\"token_rm_grad_norm: {token_rm_grad_norm if args.token_level_rm is not None else 0.0:.3f}  \"\n                f\"avg_judger_reward: {step_avg_judger_reward:.8f}  \"\n                f\"avg_adv: {step_avg_adv:.8f}  \"\n                f\"avg_gen_entropy: {step_avg_gen_entropy:.3f}  \"\n                f\"avg_ref_kl: {step_avg_ref_kl:.8f}  \"\n                f\"positive_loss: {step_positive_loss:.3f}  \"\n                f\"negative_loss: {step_negative_loss:.3f}  \"\n                f\"kl_penalty_loss: {step_kl_penalty_loss:.3f}  \"\n                f\"rl_loss: {step_rl_loss:.3f}  \"\n                f\"token_level_rm_loss: {step_token_level_rm_loss if args.token_level_rm is not None else 0.0:.3f}  \"\n                f\"kl_coef: {args.kl_coef:.5f}  \"\n                f\"max_memory: {(max_memory / 1024**3):.1f}GB  \"\n                f\"avg_new_tokens: {int(step_avg_new_tokens)}  \"\n                f\"num_rl_tokens: {int(step_rl_consumed_tokens)}  \"\n                f\"infer_tgs: {int(infer_tgs)}  \"\n                f\"rl_tgs: {int(rl_tgs)}  \"\n                f\"gen_time: {step_gen_time:.2f}s  \"\n                f\"infer_time: {step_infer_time:.2f}s  \"\n                f\"rl_time: {step_rl_time:.2f}s  \"\n                f\"total_time: {step_time:.2f}s  \"\n                f\"eta: {eta}\"\n            )\n\n        if is_interval(step, total_steps, checkpoint_interval):\n            DEVICE_MODULE.empty_cache()\n\n            num_digits = len(str(abs(total_steps)))\n            work_dir = args.work_dir\n            ckpt_dir = os.path.join(work_dir, f\"ckpt-{step+1:0{num_digits}}\")\n            hf_dir = os.path.join(work_dir, f\"hf-{step+1:0{num_digits}}\")\n\n            with profile_time_and_memory(\"[Checkpoint]\"):\n                actor_model.save_pretrained(hf_dir)\n                tokenizer.save_pretrained(hf_dir)\n\n                dist.barrier()\n\n    train_cost_time = time.time() - start_train_t\n    logger.success(f\"[Train] Cost {timedelta(seconds=int(train_cost_time))}\")\n    # ------------------------    Training  End  ---------------------------- #\n\n\nif __name__ == \"__main__\":\n    fire.Fire(train_oreal)\n"
  }
]